diff --git a/acl/acl.go b/acl/acl.go index 2a6be0e5a..57b64814f 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -62,6 +62,7 @@ type ACL struct { node string operator string quota string + plugin string } // maxPrivilege returns the policy which grants the most privilege @@ -74,6 +75,8 @@ func maxPrivilege(a, b string) string { return PolicyWrite case a == PolicyRead || b == PolicyRead: return PolicyRead + case a == PolicyList || b == PolicyList: + return PolicyList default: return "" } @@ -193,6 +196,9 @@ func NewACL(management bool, policies []*Policy) (*ACL, error) { if policy.Quota != nil { acl.quota = maxPrivilege(acl.quota, policy.Quota.Policy) } + if policy.Plugin != nil { + acl.plugin = maxPrivilege(acl.plugin, policy.Plugin.Policy) + } } // Finalize the namespaces @@ -477,6 +483,38 @@ func (a *ACL) AllowQuotaWrite() bool { } } +// AllowPluginRead checks if read operations are allowed for all plugins +func (a *ACL) AllowPluginRead() bool { + switch { + // ACL is nil only if ACLs are disabled + case a == nil: + return true + case a.management: + return true + case a.plugin == PolicyRead: + return true + default: + return false + } +} + +// AllowPluginList checks if list operations are allowed for all plugins +func (a *ACL) AllowPluginList() bool { + switch { + // ACL is nil only if ACLs are disabled + case a == nil: + return true + case a.management: + return true + case a.plugin == PolicyList: + return true + case a.plugin == PolicyRead: + return true + default: + return false + } +} + // IsManagement checks if this represents a management token func (a *ACL) IsManagement() bool { return a.management diff --git a/acl/policy.go b/acl/policy.go index bb22b45b8..b4925577e 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -13,6 +13,7 @@ const ( // which always takes precedence and supercedes. PolicyDeny = "deny" PolicyRead = "read" + PolicyList = "list" PolicyWrite = "write" ) @@ -22,17 +23,22 @@ const ( // combined we take the union of all capabilities. If the deny capability is present, it // takes precedence and overwrites all other capabilities. - NamespaceCapabilityDeny = "deny" - NamespaceCapabilityListJobs = "list-jobs" - NamespaceCapabilityReadJob = "read-job" - NamespaceCapabilitySubmitJob = "submit-job" - NamespaceCapabilityDispatchJob = "dispatch-job" - NamespaceCapabilityReadLogs = "read-logs" - NamespaceCapabilityReadFS = "read-fs" - NamespaceCapabilityAllocExec = "alloc-exec" - NamespaceCapabilityAllocNodeExec = "alloc-node-exec" - NamespaceCapabilityAllocLifecycle = "alloc-lifecycle" - NamespaceCapabilitySentinelOverride = "sentinel-override" + NamespaceCapabilityDeny = "deny" + NamespaceCapabilityListJobs = "list-jobs" + NamespaceCapabilityReadJob = "read-job" + NamespaceCapabilitySubmitJob = "submit-job" + NamespaceCapabilityDispatchJob = "dispatch-job" + NamespaceCapabilityReadLogs = "read-logs" + NamespaceCapabilityReadFS = "read-fs" + NamespaceCapabilityAllocExec = "alloc-exec" + NamespaceCapabilityAllocNodeExec = "alloc-node-exec" + NamespaceCapabilityAllocLifecycle = "alloc-lifecycle" + NamespaceCapabilitySentinelOverride = "sentinel-override" + NamespaceCapabilityCSIRegisterPlugin = "csi-register-plugin" + NamespaceCapabilityCSIWriteVolume = "csi-write-volume" + NamespaceCapabilityCSIReadVolume = "csi-read-volume" + NamespaceCapabilityCSIListVolume = "csi-list-volume" + NamespaceCapabilityCSIMountVolume = "csi-mount-volume" ) var ( @@ -62,6 +68,7 @@ type Policy struct { Node *NodePolicy `hcl:"node"` Operator *OperatorPolicy `hcl:"operator"` Quota *QuotaPolicy `hcl:"quota"` + Plugin *PluginPolicy `hcl:"plugin"` Raw string `hcl:"-"` } @@ -73,7 +80,8 @@ func (p *Policy) IsEmpty() bool { p.Agent == nil && p.Node == nil && p.Operator == nil && - p.Quota == nil + p.Quota == nil && + p.Plugin == nil } // NamespacePolicy is the policy for a specific namespace @@ -106,6 +114,10 @@ type QuotaPolicy struct { Policy string } +type PluginPolicy struct { + Policy string +} + // isPolicyValid makes sure the given string matches one of the valid policies. func isPolicyValid(policy string) bool { switch policy { @@ -116,13 +128,23 @@ func isPolicyValid(policy string) bool { } } +func (p *PluginPolicy) isValid() bool { + switch p.Policy { + case PolicyDeny, PolicyRead, PolicyList: + return true + default: + return false + } +} + // isNamespaceCapabilityValid ensures the given capability is valid for a namespace policy func isNamespaceCapabilityValid(cap string) bool { switch cap { case NamespaceCapabilityDeny, NamespaceCapabilityListJobs, NamespaceCapabilityReadJob, NamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, NamespaceCapabilityReadLogs, NamespaceCapabilityReadFS, NamespaceCapabilityAllocLifecycle, - NamespaceCapabilityAllocExec, NamespaceCapabilityAllocNodeExec: + NamespaceCapabilityAllocExec, NamespaceCapabilityAllocNodeExec, + NamespaceCapabilityCSIReadVolume, NamespaceCapabilityCSIWriteVolume, NamespaceCapabilityCSIListVolume, NamespaceCapabilityCSIMountVolume, NamespaceCapabilityCSIRegisterPlugin: return true // Separate the enterprise-only capabilities case NamespaceCapabilitySentinelOverride: @@ -135,25 +157,31 @@ func isNamespaceCapabilityValid(cap string) bool { // expandNamespacePolicy provides the equivalent set of capabilities for // a namespace policy func expandNamespacePolicy(policy string) []string { + read := []string{ + NamespaceCapabilityListJobs, + NamespaceCapabilityReadJob, + NamespaceCapabilityCSIListVolume, + NamespaceCapabilityCSIReadVolume, + } + + write := append(read, []string{ + NamespaceCapabilitySubmitJob, + NamespaceCapabilityDispatchJob, + NamespaceCapabilityReadLogs, + NamespaceCapabilityReadFS, + NamespaceCapabilityAllocExec, + NamespaceCapabilityAllocLifecycle, + NamespaceCapabilityCSIMountVolume, + NamespaceCapabilityCSIWriteVolume, + }...) + switch policy { case PolicyDeny: return []string{NamespaceCapabilityDeny} case PolicyRead: - return []string{ - NamespaceCapabilityListJobs, - NamespaceCapabilityReadJob, - } + return read case PolicyWrite: - return []string{ - NamespaceCapabilityListJobs, - NamespaceCapabilityReadJob, - NamespaceCapabilitySubmitJob, - NamespaceCapabilityDispatchJob, - NamespaceCapabilityReadLogs, - NamespaceCapabilityReadFS, - NamespaceCapabilityAllocExec, - NamespaceCapabilityAllocLifecycle, - } + return write default: return nil } @@ -261,5 +289,9 @@ func Parse(rules string) (*Policy, error) { if p.Quota != nil && !isPolicyValid(p.Quota.Policy) { return nil, fmt.Errorf("Invalid quota policy: %#v", p.Quota) } + + if p.Plugin != nil && !p.Plugin.isValid() { + return nil, fmt.Errorf("Invalid plugin policy: %#v", p.Plugin) + } return p, nil } diff --git a/acl/policy_test.go b/acl/policy_test.go index 831e80076..d8d21ac81 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -30,6 +30,8 @@ func TestParse(t *testing.T) { Capabilities: []string{ NamespaceCapabilityListJobs, NamespaceCapabilityReadJob, + NamespaceCapabilityCSIListVolume, + NamespaceCapabilityCSIReadVolume, }, }, }, @@ -58,6 +60,9 @@ func TestParse(t *testing.T) { quota { policy = "read" } + plugin { + policy = "read" + } `, "", &Policy{ @@ -68,6 +73,8 @@ func TestParse(t *testing.T) { Capabilities: []string{ NamespaceCapabilityListJobs, NamespaceCapabilityReadJob, + NamespaceCapabilityCSIListVolume, + NamespaceCapabilityCSIReadVolume, }, }, { @@ -76,12 +83,16 @@ func TestParse(t *testing.T) { Capabilities: []string{ NamespaceCapabilityListJobs, NamespaceCapabilityReadJob, + NamespaceCapabilityCSIListVolume, + NamespaceCapabilityCSIReadVolume, NamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, NamespaceCapabilityReadLogs, NamespaceCapabilityReadFS, NamespaceCapabilityAllocExec, NamespaceCapabilityAllocLifecycle, + NamespaceCapabilityCSIMountVolume, + NamespaceCapabilityCSIWriteVolume, }, }, { @@ -104,6 +115,9 @@ func TestParse(t *testing.T) { Quota: &QuotaPolicy{ Policy: PolicyRead, }, + Plugin: &PluginPolicy{ + Policy: PolicyRead, + }, }, }, { @@ -246,6 +260,28 @@ func TestParse(t *testing.T) { "Invalid host volume name", nil, }, + { + ` + plugin { + policy = "list" + } + `, + "", + &Policy{ + Plugin: &PluginPolicy{ + Policy: PolicyList, + }, + }, + }, + { + ` + plugin { + policy = "reader" + } + `, + "Invalid plugin policy", + nil, + }, } for idx, tc := range tcases { diff --git a/api/allocations.go b/api/allocations.go index 27a6d5b1f..3552390a7 100644 --- a/api/allocations.go +++ b/api/allocations.go @@ -399,6 +399,36 @@ type NodeScoreMeta struct { NormScore float64 } +// Stub returns a list stub for the allocation +func (a *Allocation) Stub() *AllocationListStub { + return &AllocationListStub{ + ID: a.ID, + EvalID: a.EvalID, + Name: a.Name, + Namespace: a.Namespace, + NodeID: a.NodeID, + NodeName: a.NodeName, + JobID: a.JobID, + JobType: *a.Job.Type, + JobVersion: *a.Job.Version, + TaskGroup: a.TaskGroup, + DesiredStatus: a.DesiredStatus, + DesiredDescription: a.DesiredDescription, + ClientStatus: a.ClientStatus, + ClientDescription: a.ClientDescription, + TaskStates: a.TaskStates, + DeploymentStatus: a.DeploymentStatus, + FollowupEvalID: a.FollowupEvalID, + RescheduleTracker: a.RescheduleTracker, + PreemptedAllocations: a.PreemptedAllocations, + PreemptedByAllocation: a.PreemptedByAllocation, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + CreateTime: a.CreateTime, + ModifyTime: a.ModifyTime, + } +} + // AllocationListStub is used to return a subset of an allocation // during list operations. type AllocationListStub struct { @@ -477,18 +507,23 @@ func (a AllocIndexSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Allocation) GetTaskGroup() *TaskGroup { + for _, tg := range a.Job.TaskGroups { + if *tg.Name == a.TaskGroup { + return tg + } + } + return nil +} + // RescheduleInfo is used to calculate remaining reschedule attempts // according to the given time and the task groups reschedule policy func (a Allocation) RescheduleInfo(t time.Time) (int, int) { - var reschedulePolicy *ReschedulePolicy - for _, tg := range a.Job.TaskGroups { - if *tg.Name == a.TaskGroup { - reschedulePolicy = tg.ReschedulePolicy - } - } - if reschedulePolicy == nil { + tg := a.GetTaskGroup() + if tg == nil || tg.ReschedulePolicy == nil { return 0, 0 } + reschedulePolicy := tg.ReschedulePolicy availableAttempts := *reschedulePolicy.Attempts interval := *reschedulePolicy.Interval attempted := 0 diff --git a/api/contexts/contexts.go b/api/contexts/contexts.go index 51b257c40..ae40db3f8 100644 --- a/api/contexts/contexts.go +++ b/api/contexts/contexts.go @@ -11,5 +11,7 @@ const ( Nodes Context = "nodes" Namespaces Context = "namespaces" Quotas Context = "quotas" + Plugins Context = "plugins" + Volumes Context = "volumes" All Context = "all" ) diff --git a/api/csi.go b/api/csi.go new file mode 100644 index 000000000..b78019659 --- /dev/null +++ b/api/csi.go @@ -0,0 +1,256 @@ +package api + +import ( + "sort" + "time" +) + +// CSIVolumes is used to query the top level csi volumes +type CSIVolumes struct { + client *Client +} + +// CSIVolumes returns a handle on the CSIVolumes endpoint +func (c *Client) CSIVolumes() *CSIVolumes { + return &CSIVolumes{client: c} +} + +// List returns all CSI volumes +func (v *CSIVolumes) List(q *QueryOptions) ([]*CSIVolumeListStub, *QueryMeta, error) { + var resp []*CSIVolumeListStub + qm, err := v.client.query("/v1/volumes?type=csi", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(CSIVolumeIndexSort(resp)) + return resp, qm, nil +} + +// PluginList returns all CSI volumes for the specified plugin id +func (v *CSIVolumes) PluginList(pluginID string) ([]*CSIVolumeListStub, *QueryMeta, error) { + return v.List(&QueryOptions{Prefix: pluginID}) +} + +// Info is used to retrieve a single CSIVolume +func (v *CSIVolumes) Info(id string, q *QueryOptions) (*CSIVolume, *QueryMeta, error) { + var resp CSIVolume + qm, err := v.client.query("/v1/volume/csi/"+id, &resp, q) + if err != nil { + return nil, nil, err + } + + // Cleanup allocation representation for the ui + resp.allocs() + + return &resp, qm, nil +} + +func (v *CSIVolumes) Register(vol *CSIVolume, w *WriteOptions) (*WriteMeta, error) { + req := CSIVolumeRegisterRequest{ + Volumes: []*CSIVolume{vol}, + } + meta, err := v.client.write("/v1/volume/csi/"+vol.ID, req, nil, w) + return meta, err +} + +func (v *CSIVolumes) Deregister(id string, w *WriteOptions) error { + _, err := v.client.delete("/v1/volume/csi/"+id, nil, w) + return err +} + +// CSIVolumeAttachmentMode duplicated in nomad/structs/csi.go +type CSIVolumeAttachmentMode string + +const ( + CSIVolumeAttachmentModeUnknown CSIVolumeAttachmentMode = "" + CSIVolumeAttachmentModeBlockDevice CSIVolumeAttachmentMode = "block-device" + CSIVolumeAttachmentModeFilesystem CSIVolumeAttachmentMode = "file-system" +) + +// CSIVolumeAccessMode duplicated in nomad/structs/csi.go +type CSIVolumeAccessMode string + +const ( + CSIVolumeAccessModeUnknown CSIVolumeAccessMode = "" + + CSIVolumeAccessModeSingleNodeReader CSIVolumeAccessMode = "single-node-reader-only" + CSIVolumeAccessModeSingleNodeWriter CSIVolumeAccessMode = "single-node-writer" + + CSIVolumeAccessModeMultiNodeReader CSIVolumeAccessMode = "multi-node-reader-only" + CSIVolumeAccessModeMultiNodeSingleWriter CSIVolumeAccessMode = "multi-node-single-writer" + CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" +) + +type CSIMountOptions struct { + FSType string `hcl:"fs_type"` + MountFlags []string `hcl:"mount_flags"` + ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` // report unexpected keys +} + +// CSIVolume is used for serialization, see also nomad/structs/csi.go +type CSIVolume struct { + ID string + Name string + ExternalID string `hcl:"external_id"` + Namespace string + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode `hcl:"access_mode"` + AttachmentMode CSIVolumeAttachmentMode `hcl:"attachment_mode"` + MountOptions *CSIMountOptions `hcl:"mount_options"` + + // Allocations, tracking claim status + ReadAllocs map[string]*Allocation + WriteAllocs map[string]*Allocation + + // Combine structs.{Read,Write}Allocs + Allocations []*AllocationListStub + + // Schedulable is true if all the denormalized plugin health fields are true + Schedulable bool + PluginID string `hcl:"plugin_id"` + Provider string + ProviderVersion string + ControllerRequired bool + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + ResourceExhausted time.Time + + CreateIndex uint64 + ModifyIndex uint64 + + // ExtraKeysHCL is used by the hcl parser to report unexpected keys + ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` +} + +// allocs is called after we query the volume (creating this CSIVolume struct) to collapse +// allocations for the UI +func (v *CSIVolume) allocs() { + for _, a := range v.WriteAllocs { + v.Allocations = append(v.Allocations, a.Stub()) + } + for _, a := range v.ReadAllocs { + v.Allocations = append(v.Allocations, a.Stub()) + } +} + +type CSIVolumeIndexSort []*CSIVolumeListStub + +func (v CSIVolumeIndexSort) Len() int { + return len(v) +} + +func (v CSIVolumeIndexSort) Less(i, j int) bool { + return v[i].CreateIndex > v[j].CreateIndex +} + +func (v CSIVolumeIndexSort) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} + +// CSIVolumeListStub omits allocations. See also nomad/structs/csi.go +type CSIVolumeListStub struct { + ID string + Namespace string + Name string + ExternalID string + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode + MountOptions *CSIMountOptions + Schedulable bool + PluginID string + Provider string + ControllerRequired bool + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + ResourceExhausted time.Time + + CreateIndex uint64 + ModifyIndex uint64 +} + +type CSIVolumeRegisterRequest struct { + Volumes []*CSIVolume + WriteRequest +} + +type CSIVolumeDeregisterRequest struct { + VolumeIDs []string + WriteRequest +} + +// CSI Plugins are jobs with plugin specific data +type CSIPlugins struct { + client *Client +} + +type CSIPlugin struct { + ID string + Provider string + Version string + ControllerRequired bool + // Map Node.ID to CSIInfo fingerprint results + Controllers map[string]*CSIInfo + Nodes map[string]*CSIInfo + Allocations []*AllocationListStub + ControllersHealthy int + NodesHealthy int + CreateIndex uint64 + ModifyIndex uint64 +} + +type CSIPluginListStub struct { + ID string + Provider string + ControllerRequired bool + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + CreateIndex uint64 + ModifyIndex uint64 +} + +type CSIPluginIndexSort []*CSIPluginListStub + +func (v CSIPluginIndexSort) Len() int { + return len(v) +} + +func (v CSIPluginIndexSort) Less(i, j int) bool { + return v[i].CreateIndex > v[j].CreateIndex +} + +func (v CSIPluginIndexSort) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} + +// CSIPlugins returns a handle on the CSIPlugins endpoint +func (c *Client) CSIPlugins() *CSIPlugins { + return &CSIPlugins{client: c} +} + +// List returns all CSI plugins +func (v *CSIPlugins) List(q *QueryOptions) ([]*CSIPluginListStub, *QueryMeta, error) { + var resp []*CSIPluginListStub + qm, err := v.client.query("/v1/plugins?type=csi", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(CSIPluginIndexSort(resp)) + return resp, qm, nil +} + +// Info is used to retrieve a single CSI Plugin Job +func (v *CSIPlugins) Info(id string, q *QueryOptions) (*CSIPlugin, *QueryMeta, error) { + var resp *CSIPlugin + qm, err := v.client.query("/v1/plugin/csi/"+id, &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} diff --git a/api/csi_test.go b/api/csi_test.go new file mode 100644 index 000000000..eeea0f918 --- /dev/null +++ b/api/csi_test.go @@ -0,0 +1,101 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestCSIVolumes_CRUD fails because of a combination of removing the job to plugin creation +// pathway and checking for plugin existence (but not yet health) at registration time. +// There are two possible solutions: +// 1. Expose the test server RPC server and force a Node.Update to fingerprint a plugin +// 2. Build and deploy a dummy CSI plugin via a job, and have it really fingerprint +func TestCSIVolumes_CRUD(t *testing.T) { + t.Parallel() + c, s, root := makeACLClient(t, nil, nil) + defer s.Stop() + v := c.CSIVolumes() + + // Successful empty result + vols, qm, err := v.List(nil) + require.NoError(t, err) + require.NotEqual(t, 0, qm.LastIndex) + require.Equal(t, 0, len(vols)) + + // FIXME we're bailing out here until one of the fixes is available + return + + // Authorized QueryOpts. Use the root token to just bypass ACL details + opts := &QueryOptions{ + Region: "global", + Namespace: "default", + AuthToken: root.SecretID, + } + + wpts := &WriteOptions{ + Region: "global", + Namespace: "default", + AuthToken: root.SecretID, + } + + // Create node plugins + nodes, _, err := c.Nodes().List(nil) + require.NoError(t, err) + require.Equal(t, 1, len(nodes)) + + nodeStub := nodes[0] + node, _, err := c.Nodes().Info(nodeStub.ID, nil) + require.NoError(t, err) + node.CSINodePlugins = map[string]*CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + RequiresControllerPlugin: false, + RequiresTopologies: false, + NodeInfo: &CSINodeInfo{ + ID: nodeStub.ID, + MaxVolumes: 200, + }, + }, + } + + // Register a volume + // This id is here as a string to avoid importing helper, which causes the lint + // rule that checks that the api package is isolated to fail + id := "DEADBEEF-31B5-8F78-7986-DD404FDA0CD1" + _, err = v.Register(&CSIVolume{ + ID: id, + Namespace: "default", + PluginID: "foo", + AccessMode: CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: CSIVolumeAttachmentModeFilesystem, + Topologies: []*CSITopology{{Segments: map[string]string{"foo": "bar"}}}, + }, wpts) + require.NoError(t, err) + + // Successful result with volumes + vols, qm, err = v.List(opts) + require.NoError(t, err) + require.NotEqual(t, 0, qm.LastIndex) + require.Equal(t, 1, len(vols)) + + // Successful info query + vol, qm, err := v.Info(id, opts) + require.NoError(t, err) + require.Equal(t, "bar", vol.Topologies[0].Segments["foo"]) + + // Deregister the volume + err = v.Deregister(id, wpts) + require.NoError(t, err) + + // Successful empty result + vols, qm, err = v.List(nil) + require.NoError(t, err) + require.NotEqual(t, 0, qm.LastIndex) + require.Equal(t, 0, len(vols)) + + // Failed info query + vol, qm, err = v.Info(id, opts) + require.Error(t, err, "missing") +} diff --git a/api/nodes.go b/api/nodes.go index ec2fed2a3..8ec6f8d0a 100644 --- a/api/nodes.go +++ b/api/nodes.go @@ -392,6 +392,16 @@ func (n *Nodes) Allocations(nodeID string, q *QueryOptions) ([]*Allocation, *Que return resp, qm, nil } +func (n *Nodes) CSIVolumes(nodeID string, q *QueryOptions) ([]*CSIVolumeListStub, error) { + var resp []*CSIVolumeListStub + path := fmt.Sprintf("/v1/volumes?type=csi&node_id=%s", nodeID) + if _, err := n.client.query(path, &resp, q); err != nil { + return nil, err + } + + return resp, nil +} + // ForceEvaluate is used to force-evaluate an existing node. func (n *Nodes) ForceEvaluate(nodeID string, q *WriteOptions) (string, *WriteMeta, error) { var resp nodeEvalResponse @@ -464,6 +474,8 @@ type Node struct { Events []*NodeEvent Drivers map[string]*DriverInfo HostVolumes map[string]*HostVolumeInfo + CSIControllerPlugins map[string]*CSIInfo + CSINodePlugins map[string]*CSIInfo CreateIndex uint64 ModifyIndex uint64 } @@ -511,6 +523,41 @@ type NodeReservedNetworkResources struct { ReservedHostPorts string } +type CSITopology struct { + Segments map[string]string +} + +// CSINodeInfo is the fingerprinted data from a CSI Plugin that is specific to +// the Node API. +type CSINodeInfo struct { + ID string + MaxVolumes int64 + AccessibleTopology *CSITopology + RequiresNodeStageVolume bool +} + +// CSIControllerInfo is the fingerprinted data from a CSI Plugin that is specific to +// the Controller API. +type CSIControllerInfo struct { + SupportsReadOnlyAttach bool + SupportsAttachDetach bool + SupportsListVolumes bool + SupportsListVolumesAttachedNodes bool +} + +// CSIInfo is the current state of a single CSI Plugin. This is updated regularly +// as plugin health changes on the node. +type CSIInfo struct { + PluginID string + Healthy bool + HealthDescription string + UpdateTime time.Time + RequiresControllerPlugin bool + RequiresTopologies bool + ControllerInfo *CSIControllerInfo `json:",omitempty"` + NodeInfo *CSINodeInfo `json:",omitempty"` +} + // DrainStrategy describes a Node's drain behavior. type DrainStrategy struct { // DrainSpec is the user declared drain specification diff --git a/api/tasks.go b/api/tasks.go index 4e05a2cd3..2e6e64d88 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -377,10 +377,12 @@ func (m *MigrateStrategy) Copy() *MigrateStrategy { // VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use. type VolumeRequest struct { - Name string - Type string - Source string - ReadOnly bool `mapstructure:"read_only"` + Name string + Type string + Source string + ReadOnly bool `hcl:"read_only"` + MountOptions *CSIMountOptions `hcl:"mount_options"` + ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` } const ( @@ -643,6 +645,7 @@ type Task struct { Templates []*Template DispatchPayload *DispatchPayloadConfig VolumeMounts []*VolumeMount + CSIPluginConfig *TaskCSIPluginConfig `mapstructure:"csi_plugin" json:"csi_plugin,omitempty"` Leader bool ShutdownDelay time.Duration `mapstructure:"shutdown_delay"` KillSignal string `mapstructure:"kill_signal"` @@ -683,6 +686,9 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { if t.Lifecycle.Empty() { t.Lifecycle = nil } + if t.CSIPluginConfig != nil { + t.CSIPluginConfig.Canonicalize() + } } // TaskArtifact is used to download artifacts before running a task. @@ -909,3 +915,48 @@ type TaskEvent struct { TaskSignal string GenericSource string } + +// CSIPluginType is an enum string that encapsulates the valid options for a +// CSIPlugin stanza's Type. These modes will allow the plugin to be used in +// different ways by the client. +type CSIPluginType string + +const ( + // CSIPluginTypeNode indicates that Nomad should only use the plugin for + // performing Node RPCs against the provided plugin. + CSIPluginTypeNode CSIPluginType = "node" + + // CSIPluginTypeController indicates that Nomad should only use the plugin for + // performing Controller RPCs against the provided plugin. + CSIPluginTypeController CSIPluginType = "controller" + + // CSIPluginTypeMonolith indicates that Nomad can use the provided plugin for + // both controller and node rpcs. + CSIPluginTypeMonolith CSIPluginType = "monolith" +) + +// TaskCSIPluginConfig contains the data that is required to setup a task as a +// CSI plugin. This will be used by the csi_plugin_supervisor_hook to configure +// mounts for the plugin and initiate the connection to the plugin catalog. +type TaskCSIPluginConfig struct { + // ID is the identifier of the plugin. + // Ideally this should be the FQDN of the plugin. + ID string `mapstructure:"id"` + + // CSIPluginType instructs Nomad on how to handle processing a plugin + Type CSIPluginType `mapstructure:"type"` + + // MountDir is the destination that nomad should mount in its CSI + // directory for the plugin. It will then expect a file called CSISocketName + // to be created by the plugin, and will provide references into + // "MountDir/CSIIntermediaryDirname/VolumeName/AllocID for mounts. + // + // Default is /csi. + MountDir string `mapstructure:"mount_dir"` +} + +func (t *TaskCSIPluginConfig) Canonicalize() { + if t.MountDir == "" { + t.MountDir = "/csi" + } +} diff --git a/client/allocrunner/alloc_runner.go b/client/allocrunner/alloc_runner.go index fdd62ad98..364f7b884 100644 --- a/client/allocrunner/alloc_runner.go +++ b/client/allocrunner/alloc_runner.go @@ -17,7 +17,9 @@ import ( "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/devicemanager" + "github.com/hashicorp/nomad/client/dynamicplugins" cinterfaces "github.com/hashicorp/nomad/client/interfaces" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" cstate "github.com/hashicorp/nomad/client/state" cstructs "github.com/hashicorp/nomad/client/structs" @@ -118,6 +120,10 @@ type allocRunner struct { // transistions. runnerHooks []interfaces.RunnerHook + // hookState is the output of allocrunner hooks + hookState *cstructs.AllocHookResources + hookStateMu sync.RWMutex + // tasks are the set of task runners tasks map[string]*taskrunner.TaskRunner @@ -134,6 +140,14 @@ type allocRunner struct { // prevAllocMigrator allows the migration of a previous allocations alloc dir. prevAllocMigrator allocwatcher.PrevAllocMigrator + // dynamicRegistry contains all locally registered dynamic plugins (e.g csi + // plugins). + dynamicRegistry dynamicplugins.Registry + + // csiManager is used to wait for CSI Volumes to be attached, and by the task + // runner to manage their mounting + csiManager csimanager.Manager + // devicemanager is used to mount devices as well as lookup device // statistics devicemanager devicemanager.Manager @@ -148,6 +162,15 @@ type allocRunner struct { serversContactedCh chan struct{} taskHookCoordinator *taskHookCoordinator + + // rpcClient is the RPC Client that should be used by the allocrunner and its + // hooks to communicate with Nomad Servers. + rpcClient RPCer +} + +// RPCer is the interface needed by hooks to make RPC calls. +type RPCer interface { + RPC(method string, args interface{}, reply interface{}) error } // NewAllocRunner returns a new allocation runner. @@ -178,9 +201,12 @@ func NewAllocRunner(config *Config) (*allocRunner, error) { deviceStatsReporter: config.DeviceStatsReporter, prevAllocWatcher: config.PrevAllocWatcher, prevAllocMigrator: config.PrevAllocMigrator, + dynamicRegistry: config.DynamicRegistry, + csiManager: config.CSIManager, devicemanager: config.DeviceManager, driverManager: config.DriverManager, serversContactedCh: config.ServersContactedCh, + rpcClient: config.RPCClient, } // Create the logger based on the allocation ID @@ -218,10 +244,12 @@ func (ar *allocRunner) initTaskRunners(tasks []*structs.Task) error { Logger: ar.logger, StateDB: ar.stateDB, StateUpdater: ar, + DynamicRegistry: ar.dynamicRegistry, Consul: ar.consulClient, ConsulSI: ar.sidsClient, Vault: ar.vaultClient, DeviceStatsReporter: ar.deviceStatsReporter, + CSIManager: ar.csiManager, DeviceManager: ar.devicemanager, DriverManager: ar.driverManager, ServersContactedCh: ar.serversContactedCh, diff --git a/client/allocrunner/alloc_runner_hooks.go b/client/allocrunner/alloc_runner_hooks.go index 81c1e7cee..af9e6b61c 100644 --- a/client/allocrunner/alloc_runner_hooks.go +++ b/client/allocrunner/alloc_runner_hooks.go @@ -7,11 +7,41 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/client/allocrunner/interfaces" clientconfig "github.com/hashicorp/nomad/client/config" + cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" ) +type hookResourceSetter interface { + GetAllocHookResources() *cstructs.AllocHookResources + SetAllocHookResources(*cstructs.AllocHookResources) +} + +type allocHookResourceSetter struct { + ar *allocRunner +} + +func (a *allocHookResourceSetter) GetAllocHookResources() *cstructs.AllocHookResources { + a.ar.hookStateMu.RLock() + defer a.ar.hookStateMu.RUnlock() + + return a.ar.hookState +} + +func (a *allocHookResourceSetter) SetAllocHookResources(res *cstructs.AllocHookResources) { + a.ar.hookStateMu.Lock() + defer a.ar.hookStateMu.Unlock() + + a.ar.hookState = res + + // Propagate to all of the TRs within the lock to ensure consistent state. + // TODO: Refactor so TR's pull state from AR? + for _, tr := range a.ar.tasks { + tr.SetAllocHookResources(res) + } +} + type networkIsolationSetter interface { SetNetworkIsolation(*drivers.NetworkIsolationSpec) } @@ -105,6 +135,10 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { // create network isolation setting shim ns := &allocNetworkIsolationSetter{ar: ar} + // create hook resource setting shim + hrs := &allocHookResourceSetter{ar: ar} + hrs.SetAllocHookResources(&cstructs.AllocHookResources{}) + // build the network manager nm, err := newNetworkManager(ar.Alloc(), ar.driverManager) if err != nil { @@ -134,6 +168,7 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { logger: hookLogger, }), newConsulSockHook(hookLogger, alloc, ar.allocDir, config.ConsulConfig), + newCSIHook(hookLogger, alloc, ar.rpcClient, ar.csiManager, hrs), } return nil diff --git a/client/allocrunner/config.go b/client/allocrunner/config.go index a9240b3a3..8eb013ede 100644 --- a/client/allocrunner/config.go +++ b/client/allocrunner/config.go @@ -6,7 +6,9 @@ import ( clientconfig "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/devicemanager" + "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/interfaces" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" cstate "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/client/vaultclient" @@ -48,6 +50,14 @@ type Config struct { // PrevAllocMigrator allows the migration of a previous allocations alloc dir PrevAllocMigrator allocwatcher.PrevAllocMigrator + // DynamicRegistry contains all locally registered dynamic plugins (e.g csi + // plugins). + DynamicRegistry dynamicplugins.Registry + + // CSIManager is used to wait for CSI Volumes to be attached, and by the task + // runner to manage their mounting + CSIManager csimanager.Manager + // DeviceManager is used to mount devices as well as lookup device // statistics DeviceManager devicemanager.Manager @@ -58,4 +68,8 @@ type Config struct { // ServersContactedCh is closed when the first GetClientAllocs call to // servers succeeds and allocs are synced. ServersContactedCh chan struct{} + + // RPCClient is the RPC Client that should be used by the allocrunner and its + // hooks to communicate with Nomad Servers. + RPCClient RPCer } diff --git a/client/allocrunner/csi_hook.go b/client/allocrunner/csi_hook.go new file mode 100644 index 000000000..ac16cbbe5 --- /dev/null +++ b/client/allocrunner/csi_hook.go @@ -0,0 +1,218 @@ +package allocrunner + +import ( + "context" + "fmt" + + hclog "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" + "github.com/hashicorp/nomad/nomad/structs" +) + +// csiHook will wait for remote csi volumes to be attached to the host before +// continuing. +// +// It is a noop for allocs that do not depend on CSI Volumes. +type csiHook struct { + alloc *structs.Allocation + logger hclog.Logger + csimanager csimanager.Manager + rpcClient RPCer + updater hookResourceSetter +} + +func (c *csiHook) Name() string { + return "csi_hook" +} + +func (c *csiHook) Prerun() error { + if !c.shouldRun() { + return nil + } + + ctx := context.TODO() + volumes, err := c.claimVolumesFromAlloc() + if err != nil { + return fmt.Errorf("claim volumes: %v", err) + } + + mounts := make(map[string]*csimanager.MountInfo, len(volumes)) + for alias, pair := range volumes { + mounter, err := c.csimanager.MounterForVolume(ctx, pair.volume) + if err != nil { + return err + } + + usageOpts := &csimanager.UsageOptions{ + ReadOnly: pair.request.ReadOnly, + AttachmentMode: string(pair.volume.AttachmentMode), + AccessMode: string(pair.volume.AccessMode), + MountOptions: pair.request.MountOptions, + } + + mountInfo, err := mounter.MountVolume(ctx, pair.volume, c.alloc, usageOpts, pair.publishContext) + if err != nil { + return err + } + + mounts[alias] = mountInfo + } + + res := c.updater.GetAllocHookResources() + res.CSIMounts = mounts + c.updater.SetAllocHookResources(res) + + return nil +} + +func (c *csiHook) Postrun() error { + if !c.shouldRun() { + return nil + } + + ctx := context.TODO() + volumes, err := c.csiVolumesFromAlloc() + if err != nil { + return err + } + + // For Postrun, we accumulate all unmount errors, rather than stopping on the + // first failure. This is because we want to make a best effort to free all + // storage, and in some cases there may be incorrect errors from volumes that + // never mounted correctly during prerun when an alloc is failed. It may also + // fail because a volume was externally deleted while in use by this alloc. + var result *multierror.Error + + for _, pair := range volumes { + mounter, err := c.csimanager.MounterForVolume(ctx, pair.volume) + if err != nil { + result = multierror.Append(result, err) + continue + } + + usageOpts := &csimanager.UsageOptions{ + ReadOnly: pair.request.ReadOnly, + AttachmentMode: string(pair.volume.AttachmentMode), + AccessMode: string(pair.volume.AccessMode), + } + + err = mounter.UnmountVolume(ctx, pair.volume, c.alloc, usageOpts) + if err != nil { + result = multierror.Append(result, err) + continue + } + } + + return result.ErrorOrNil() +} + +type volumeAndRequest struct { + volume *structs.CSIVolume + request *structs.VolumeRequest + + // When volumeAndRequest was returned from a volume claim, this field will be + // populated for plugins that require it. + publishContext map[string]string +} + +// claimVolumesFromAlloc is used by the pre-run hook to fetch all of the volume +// metadata and claim it for use by this alloc/node at the same time. +func (c *csiHook) claimVolumesFromAlloc() (map[string]*volumeAndRequest, error) { + result := make(map[string]*volumeAndRequest) + tg := c.alloc.Job.LookupTaskGroup(c.alloc.TaskGroup) + + // Initially, populate the result map with all of the requests + for alias, volumeRequest := range tg.Volumes { + if volumeRequest.Type == structs.VolumeTypeCSI { + result[alias] = &volumeAndRequest{request: volumeRequest} + } + } + + // Iterate over the result map and upsert the volume field as each volume gets + // claimed by the server. + for alias, pair := range result { + claimType := structs.CSIVolumeClaimWrite + if pair.request.ReadOnly { + claimType = structs.CSIVolumeClaimRead + } + + req := &structs.CSIVolumeClaimRequest{ + VolumeID: pair.request.Source, + AllocationID: c.alloc.ID, + Claim: claimType, + } + req.Region = c.alloc.Job.Region + + var resp structs.CSIVolumeClaimResponse + if err := c.rpcClient.RPC("CSIVolume.Claim", req, &resp); err != nil { + return nil, err + } + + if resp.Volume == nil { + return nil, fmt.Errorf("Unexpected nil volume returned for ID: %v", pair.request.Source) + } + + result[alias].volume = resp.Volume + result[alias].publishContext = resp.PublishContext + } + + return result, nil +} + +// csiVolumesFromAlloc finds all the CSI Volume requests from the allocation's +// task group and then fetches them from the Nomad Server, before returning +// them in the form of map[RequestedAlias]*volumeAndReqest. This allows us to +// thread the request context through to determine usage options for each volume. +// +// If any volume fails to validate then we return an error. +func (c *csiHook) csiVolumesFromAlloc() (map[string]*volumeAndRequest, error) { + vols := make(map[string]*volumeAndRequest) + tg := c.alloc.Job.LookupTaskGroup(c.alloc.TaskGroup) + for alias, vol := range tg.Volumes { + if vol.Type == structs.VolumeTypeCSI { + vols[alias] = &volumeAndRequest{request: vol} + } + } + + for alias, pair := range vols { + req := &structs.CSIVolumeGetRequest{ + ID: pair.request.Source, + } + req.Region = c.alloc.Job.Region + + var resp structs.CSIVolumeGetResponse + if err := c.rpcClient.RPC("CSIVolume.Get", req, &resp); err != nil { + return nil, err + } + + if resp.Volume == nil { + return nil, fmt.Errorf("Unexpected nil volume returned for ID: %v", pair.request.Source) + } + + vols[alias].volume = resp.Volume + } + + return vols, nil +} + +func newCSIHook(logger hclog.Logger, alloc *structs.Allocation, rpcClient RPCer, csi csimanager.Manager, updater hookResourceSetter) *csiHook { + return &csiHook{ + alloc: alloc, + logger: logger.Named("csi_hook"), + rpcClient: rpcClient, + csimanager: csi, + updater: updater, + } +} + +func (h *csiHook) shouldRun() bool { + tg := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup) + for _, vol := range tg.Volumes { + if vol.Type == structs.VolumeTypeCSI { + return true + } + } + + return false +} diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go new file mode 100644 index 000000000..7d730ead7 --- /dev/null +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -0,0 +1,373 @@ +package taskrunner + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/client/allocrunner/interfaces" + ti "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" + "github.com/hashicorp/nomad/plugins/drivers" +) + +// csiPluginSupervisorHook manages supervising plugins that are running as Nomad +// tasks. These plugins will be fingerprinted and it will manage connecting them +// to their requisite plugin manager. +// +// It provides a couple of things to a task running inside Nomad. These are: +// * A mount to the `plugin_mount_dir`, that will then be used by Nomad +// to connect to the nested plugin and handle volume mounts. +// * When the task has started, it starts a loop of attempting to connect to the +// plugin, to perform initial fingerprinting of the plugins capabilities before +// notifying the plugin manager of the plugin. +type csiPluginSupervisorHook struct { + logger hclog.Logger + alloc *structs.Allocation + task *structs.Task + runner *TaskRunner + mountPoint string + + // eventEmitter is used to emit events to the task + eventEmitter ti.EventEmitter + + shutdownCtx context.Context + shutdownCancelFn context.CancelFunc + + running bool + runningLock sync.Mutex + + // previousHealthstate is used by the supervisor goroutine to track historic + // health states for gating task events. + previousHealthState bool +} + +// The plugin supervisor uses the PrestartHook mechanism to setup the requisite +// mount points and configuration for the task that exposes a CSI plugin. +var _ interfaces.TaskPrestartHook = &csiPluginSupervisorHook{} + +// The plugin supervisor uses the PoststartHook mechanism to start polling the +// plugin for readiness and supported functionality before registering the +// plugin with the catalog. +var _ interfaces.TaskPoststartHook = &csiPluginSupervisorHook{} + +// The plugin supervisor uses the StopHook mechanism to deregister the plugin +// with the catalog and to ensure any mounts are cleaned up. +var _ interfaces.TaskStopHook = &csiPluginSupervisorHook{} + +func newCSIPluginSupervisorHook(csiRootDir string, eventEmitter ti.EventEmitter, runner *TaskRunner, logger hclog.Logger) *csiPluginSupervisorHook { + task := runner.Task() + + // The Plugin directory will look something like this: + // . + // .. + // csi.sock - A unix domain socket used to communicate with the CSI Plugin + // staging/ + // {volume-id}/{usage-mode-hash}/ - Intermediary mount point that will be used by plugins that support NODE_STAGE_UNSTAGE capabilities. + // per-alloc/ + // {alloc-id}/{volume-id}/{usage-mode-hash}/ - Mount Point that will be bind-mounted into tasks that utilise the volume + pluginRoot := filepath.Join(csiRootDir, string(task.CSIPluginConfig.Type), task.CSIPluginConfig.ID) + + shutdownCtx, cancelFn := context.WithCancel(context.Background()) + + hook := &csiPluginSupervisorHook{ + alloc: runner.Alloc(), + runner: runner, + logger: logger, + task: task, + mountPoint: pluginRoot, + shutdownCtx: shutdownCtx, + shutdownCancelFn: cancelFn, + eventEmitter: eventEmitter, + } + + return hook +} + +func (*csiPluginSupervisorHook) Name() string { + return "csi_plugin_supervisor" +} + +// Prestart is called before the task is started including after every +// restart. This requires that the mount paths for a plugin be idempotent, +// despite us not knowing the name of the plugin ahead of time. +// Because of this, we use the allocid_taskname as the unique identifier for a +// plugin on the filesystem. +func (h *csiPluginSupervisorHook) Prestart(ctx context.Context, + req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { + // Create the mount directory that the container will access if it doesn't + // already exist. Default to only nomad user access. + if err := os.MkdirAll(h.mountPoint, 0700); err != nil && !os.IsExist(err) { + return fmt.Errorf("failed to create mount point: %v", err) + } + + configMount := &drivers.MountConfig{ + TaskPath: h.task.CSIPluginConfig.MountDir, + HostPath: h.mountPoint, + Readonly: false, + PropagationMode: "bidirectional", + } + devMount := &drivers.MountConfig{ + TaskPath: "/dev", + HostPath: "/dev", + Readonly: false, + } + + mounts := ensureMountpointInserted(h.runner.hookResources.getMounts(), configMount) + mounts = ensureMountpointInserted(mounts, devMount) + + h.runner.hookResources.setMounts(mounts) + + resp.Done = true + return nil +} + +// Poststart is called after the task has started. Poststart is not +// called if the allocation is terminal. +// +// The context is cancelled if the task is killed. +func (h *csiPluginSupervisorHook) Poststart(_ context.Context, _ *interfaces.TaskPoststartRequest, _ *interfaces.TaskPoststartResponse) error { + // If we're already running the supervisor routine, then we don't need to try + // and restart it here as it only terminates on `Stop` hooks. + h.runningLock.Lock() + if h.running { + h.runningLock.Unlock() + return nil + } + h.runningLock.Unlock() + + go h.ensureSupervisorLoop(h.shutdownCtx) + return nil +} + +// ensureSupervisorLoop should be called in a goroutine. It will terminate when +// the passed in context is terminated. +// +// The supervisor works by: +// - Initially waiting for the plugin to become available. This loop is expensive +// and may do things like create new gRPC Clients on every iteration. +// - After receiving an initial healthy status, it will inform the plugin catalog +// of the plugin, registering it with the plugins fingerprinted capabilities. +// - We then perform a more lightweight check, simply probing the plugin on a less +// frequent interval to ensure it is still alive, emitting task events when this +// status changes. +// +// Deeper fingerprinting of the plugin is implemented by the csimanager. +func (h *csiPluginSupervisorHook) ensureSupervisorLoop(ctx context.Context) { + h.runningLock.Lock() + if h.running == true { + h.runningLock.Unlock() + return + } + h.running = true + h.runningLock.Unlock() + + defer func() { + h.runningLock.Lock() + h.running = false + h.runningLock.Unlock() + }() + + socketPath := filepath.Join(h.mountPoint, structs.CSISocketName) + t := time.NewTimer(0) + + // Step 1: Wait for the plugin to initially become available. +WAITFORREADY: + for { + select { + case <-ctx.Done(): + return + case <-t.C: + pluginHealthy, err := h.supervisorLoopOnce(ctx, socketPath) + if err != nil || !pluginHealthy { + h.logger.Debug("CSI Plugin not ready", "error", err) + + // Plugin is not yet returning healthy, because we want to optimise for + // quickly bringing a plugin online, we use a short timeout here. + // TODO(dani): Test with more plugins and adjust. + t.Reset(5 * time.Second) + continue + } + + // Mark the plugin as healthy in a task event + h.previousHealthState = pluginHealthy + event := structs.NewTaskEvent(structs.TaskPluginHealthy) + event.SetMessage(fmt.Sprintf("plugin: %s", h.task.CSIPluginConfig.ID)) + h.eventEmitter.EmitEvent(event) + + break WAITFORREADY + } + } + + // Step 2: Register the plugin with the catalog. + deregisterPluginFn, err := h.registerPlugin(socketPath) + if err != nil { + h.logger.Error("CSI Plugin registration failed", "error", err) + event := structs.NewTaskEvent(structs.TaskPluginUnhealthy) + event.SetMessage(fmt.Sprintf("failed to register plugin: %s, reason: %v", h.task.CSIPluginConfig.ID, err)) + h.eventEmitter.EmitEvent(event) + } + + // Step 3: Start the lightweight supervisor loop. + t.Reset(0) + for { + select { + case <-ctx.Done(): + // De-register plugins on task shutdown + deregisterPluginFn() + return + case <-t.C: + pluginHealthy, err := h.supervisorLoopOnce(ctx, socketPath) + if err != nil { + h.logger.Error("CSI Plugin fingerprinting failed", "error", err) + } + + // The plugin has transitioned to a healthy state. Emit an event. + if !h.previousHealthState && pluginHealthy { + event := structs.NewTaskEvent(structs.TaskPluginHealthy) + event.SetMessage(fmt.Sprintf("plugin: %s", h.task.CSIPluginConfig.ID)) + h.eventEmitter.EmitEvent(event) + } + + // The plugin has transitioned to an unhealthy state. Emit an event. + if h.previousHealthState && !pluginHealthy { + event := structs.NewTaskEvent(structs.TaskPluginUnhealthy) + if err != nil { + event.SetMessage(fmt.Sprintf("error: %v", err)) + } else { + event.SetMessage("Unknown Reason") + } + h.eventEmitter.EmitEvent(event) + } + + h.previousHealthState = pluginHealthy + + // This loop is informational and in some plugins this may be expensive to + // validate. We use a longer timeout (30s) to avoid causing undue work. + t.Reset(30 * time.Second) + } + } +} + +func (h *csiPluginSupervisorHook) registerPlugin(socketPath string) (func(), error) { + + // At this point we know the plugin is ready and we can fingerprint it + // to get its vendor name and version + client, err := csi.NewClient(socketPath, h.logger.Named("csi_client").With("plugin.name", h.task.CSIPluginConfig.ID, "plugin.type", h.task.CSIPluginConfig.Type)) + defer client.Close() + if err != nil { + return nil, fmt.Errorf("failed to create csi client: %v", err) + } + + info, err := client.PluginInfo() + if err != nil { + return nil, fmt.Errorf("failed to probe plugin: %v", err) + } + + mkInfoFn := func(pluginType string) *dynamicplugins.PluginInfo { + return &dynamicplugins.PluginInfo{ + Type: pluginType, + Name: h.task.CSIPluginConfig.ID, + Version: info.PluginVersion, + ConnectionInfo: &dynamicplugins.PluginConnectionInfo{ + SocketPath: socketPath, + }, + AllocID: h.alloc.ID, + Options: map[string]string{ + "Provider": info.Name, // vendor name + "MountPoint": h.mountPoint, + "ContainerMountPoint": h.task.CSIPluginConfig.MountDir, + }, + } + } + + registrations := []*dynamicplugins.PluginInfo{} + + switch h.task.CSIPluginConfig.Type { + case structs.CSIPluginTypeController: + registrations = append(registrations, mkInfoFn(dynamicplugins.PluginTypeCSIController)) + case structs.CSIPluginTypeNode: + registrations = append(registrations, mkInfoFn(dynamicplugins.PluginTypeCSINode)) + case structs.CSIPluginTypeMonolith: + registrations = append(registrations, mkInfoFn(dynamicplugins.PluginTypeCSIController)) + registrations = append(registrations, mkInfoFn(dynamicplugins.PluginTypeCSINode)) + } + + deregistrationFns := []func(){} + + for _, reg := range registrations { + if err := h.runner.dynamicRegistry.RegisterPlugin(reg); err != nil { + for _, fn := range deregistrationFns { + fn() + } + return nil, err + } + + // need to rebind these so that each deregistration function + // closes over its own registration + rname := reg.Name + rtype := reg.Type + deregistrationFns = append(deregistrationFns, func() { + err := h.runner.dynamicRegistry.DeregisterPlugin(rtype, rname) + if err != nil { + h.logger.Error("failed to deregister csi plugin", "name", rname, "type", rtype, "error", err) + } + }) + } + + return func() { + for _, fn := range deregistrationFns { + fn() + } + }, nil +} + +func (h *csiPluginSupervisorHook) supervisorLoopOnce(ctx context.Context, socketPath string) (bool, error) { + _, err := os.Stat(socketPath) + if err != nil { + return false, fmt.Errorf("failed to stat socket: %v", err) + } + + client, err := csi.NewClient(socketPath, h.logger.Named("csi_client").With("plugin.name", h.task.CSIPluginConfig.ID, "plugin.type", h.task.CSIPluginConfig.Type)) + defer client.Close() + if err != nil { + return false, fmt.Errorf("failed to create csi client: %v", err) + } + + healthy, err := client.PluginProbe(ctx) + if err != nil { + return false, fmt.Errorf("failed to probe plugin: %v", err) + } + + return healthy, nil +} + +// Stop is called after the task has exited and will not be started +// again. It is the only hook guaranteed to be executed whenever +// TaskRunner.Run is called (and not gracefully shutting down). +// Therefore it may be called even when prestart and the other hooks +// have not. +// +// Stop hooks must be idempotent. The context is cancelled prematurely if the +// task is killed. +func (h *csiPluginSupervisorHook) Stop(_ context.Context, req *interfaces.TaskStopRequest, _ *interfaces.TaskStopResponse) error { + h.shutdownCancelFn() + return nil +} + +func ensureMountpointInserted(mounts []*drivers.MountConfig, mount *drivers.MountConfig) []*drivers.MountConfig { + for _, mnt := range mounts { + if mnt.IsEqual(mount) { + return mounts + } + } + + mounts = append(mounts, mount) + return mounts +} diff --git a/client/allocrunner/taskrunner/task_runner.go b/client/allocrunner/taskrunner/task_runner.go index a24b634e5..e8a054e4c 100644 --- a/client/allocrunner/taskrunner/task_runner.go +++ b/client/allocrunner/taskrunner/task_runner.go @@ -19,7 +19,9 @@ import ( "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/devicemanager" + "github.com/hashicorp/nomad/client/dynamicplugins" cinterfaces "github.com/hashicorp/nomad/client/interfaces" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" cstate "github.com/hashicorp/nomad/client/state" cstructs "github.com/hashicorp/nomad/client/structs" @@ -186,6 +188,9 @@ type TaskRunner struct { // deviceStatsReporter is used to lookup resource usage for alloc devices deviceStatsReporter cinterfaces.DeviceStatsReporter + // csiManager is used to manage the mounting of CSI volumes into tasks + csiManager csimanager.Manager + // devicemanager is used to mount devices as well as lookup device // statistics devicemanager devicemanager.Manager @@ -194,6 +199,9 @@ type TaskRunner struct { // handlers driverManager drivermanager.Manager + // dynamicRegistry is where dynamic plugins should be registered. + dynamicRegistry dynamicplugins.Registry + // maxEvents is the capacity of the TaskEvents on the TaskState. // Defaults to defaultMaxEvents but overrideable for testing. maxEvents int @@ -212,6 +220,8 @@ type TaskRunner struct { networkIsolationLock sync.Mutex networkIsolationSpec *drivers.NetworkIsolationSpec + + allocHookResources *cstructs.AllocHookResources } type Config struct { @@ -227,6 +237,9 @@ type Config struct { // ConsulSI is the client to use for managing Consul SI tokens ConsulSI consul.ServiceIdentityAPI + // DynamicRegistry is where dynamic plugins should be registered. + DynamicRegistry dynamicplugins.Registry + // Vault is the client to use to derive and renew Vault tokens Vault vaultclient.VaultClient @@ -239,6 +252,9 @@ type Config struct { // deviceStatsReporter is used to lookup resource usage for alloc devices DeviceStatsReporter cinterfaces.DeviceStatsReporter + // CSIManager is used to manage the mounting of CSI volumes into tasks + CSIManager csimanager.Manager + // DeviceManager is used to mount devices as well as lookup device // statistics DeviceManager devicemanager.Manager @@ -285,6 +301,7 @@ func NewTaskRunner(config *Config) (*TaskRunner, error) { taskName: config.Task.Name, taskLeader: config.Task.Leader, envBuilder: envBuilder, + dynamicRegistry: config.DynamicRegistry, consulClient: config.Consul, siClient: config.ConsulSI, vaultClient: config.Vault, @@ -299,6 +316,7 @@ func NewTaskRunner(config *Config) (*TaskRunner, error) { shutdownCtxCancel: trCancel, triggerUpdateCh: make(chan struct{}, triggerUpdateChCap), waitCh: make(chan struct{}), + csiManager: config.CSIManager, devicemanager: config.DeviceManager, driverManager: config.DriverManager, maxEvents: defaultMaxEvents, @@ -1392,3 +1410,7 @@ func (tr *TaskRunner) TaskExecHandler() drivermanager.TaskExecHandler { func (tr *TaskRunner) DriverCapabilities() (*drivers.Capabilities, error) { return tr.driver.Capabilities() } + +func (tr *TaskRunner) SetAllocHookResources(res *cstructs.AllocHookResources) { + tr.allocHookResources = res +} diff --git a/client/allocrunner/taskrunner/task_runner_hooks.go b/client/allocrunner/taskrunner/task_runner_hooks.go index 549b8316e..470ecd2db 100644 --- a/client/allocrunner/taskrunner/task_runner_hooks.go +++ b/client/allocrunner/taskrunner/task_runner_hooks.go @@ -3,6 +3,7 @@ package taskrunner import ( "context" "fmt" + "path/filepath" "sync" "time" @@ -69,6 +70,11 @@ func (tr *TaskRunner) initHooks() { newDeviceHook(tr.devicemanager, hookLogger), } + // If the task has a CSI stanza, add the hook. + if task.CSIPluginConfig != nil { + tr.runnerHooks = append(tr.runnerHooks, newCSIPluginSupervisorHook(filepath.Join(tr.clientConfig.StateDir, "csi"), tr, tr, hookLogger)) + } + // If Vault is enabled, add the hook if task.Vault != nil { tr.runnerHooks = append(tr.runnerHooks, newVaultHook(&vaultHookConfig{ diff --git a/client/allocrunner/taskrunner/volume_hook.go b/client/allocrunner/taskrunner/volume_hook.go index 1e0935aea..5447c5029 100644 --- a/client/allocrunner/taskrunner/volume_hook.go +++ b/client/allocrunner/taskrunner/volume_hook.go @@ -7,14 +7,16 @@ import ( log "github.com/hashicorp/go-hclog" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/client/allocrunner/interfaces" + "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" ) type volumeHook struct { - alloc *structs.Allocation - runner *TaskRunner - logger log.Logger + alloc *structs.Allocation + runner *TaskRunner + logger log.Logger + taskEnv *taskenv.TaskEnv } func newVolumeHook(runner *TaskRunner, logger log.Logger) *volumeHook { @@ -34,6 +36,8 @@ func validateHostVolumes(requestedByAlias map[string]*structs.VolumeRequest, cli var result error for _, req := range requestedByAlias { + // This is a defensive check, but this function should only ever receive + // host-type volumes. if req.Type != structs.VolumeTypeHost { continue } @@ -55,8 +59,16 @@ func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeM for _, m := range taskMounts { req, ok := taskVolumesByAlias[m.Volume] if !ok { - // Should never happen unless we misvalidated on job submission - return nil, fmt.Errorf("No group volume declaration found named: %s", m.Volume) + // This function receives only the task volumes that are of type Host, + // if we can't find a group volume then we assume the mount is for another + // type. + continue + } + + // This is a defensive check, but this function should only ever receive + // host-type volumes. + if req.Type != structs.VolumeTypeHost { + continue } hostVolume, ok := clientVolumesByName[req.Source] @@ -77,22 +89,100 @@ func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeM return mounts, nil } -func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { - volumes := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes - mounts := h.runner.hookResources.getMounts() +// partitionVolumesByType takes a map of volume-alias to volume-request and +// returns them in the form of volume-type:(volume-alias:volume-request) +func partitionVolumesByType(xs map[string]*structs.VolumeRequest) map[string]map[string]*structs.VolumeRequest { + result := make(map[string]map[string]*structs.VolumeRequest) + for name, req := range xs { + txs, ok := result[req.Type] + if !ok { + txs = make(map[string]*structs.VolumeRequest) + result[req.Type] = txs + } + txs[name] = req + } + return result +} + +func (h *volumeHook) prepareHostVolumes(req *interfaces.TaskPrestartRequest, volumes map[string]*structs.VolumeRequest) ([]*drivers.MountConfig, error) { hostVolumes := h.runner.clientConfig.Node.HostVolumes // Always validate volumes to ensure that we do not allow volumes to be used // if a host is restarted and loses the host volume configuration. if err := validateHostVolumes(volumes, hostVolumes); err != nil { h.logger.Error("Requested Host Volume does not exist", "existing", hostVolumes, "requested", volumes) - return fmt.Errorf("host volume validation error: %v", err) + return nil, fmt.Errorf("host volume validation error: %v", err) } - requestedMounts, err := h.hostVolumeMountConfigurations(req.Task.VolumeMounts, volumes, hostVolumes) + hostVolumeMounts, err := h.hostVolumeMountConfigurations(req.Task.VolumeMounts, volumes, hostVolumes) + if err != nil { + h.logger.Error("Failed to generate host volume mounts", "error", err) + return nil, err + } + + return hostVolumeMounts, nil +} + +// partitionMountsByVolume takes a list of volume mounts and returns them in the +// form of volume-alias:[]volume-mount because one volume may be mounted multiple +// times. +func partitionMountsByVolume(xs []*structs.VolumeMount) map[string][]*structs.VolumeMount { + result := make(map[string][]*structs.VolumeMount) + for _, mount := range xs { + result[mount.Volume] = append(result[mount.Volume], mount) + } + + return result +} + +func (h *volumeHook) prepareCSIVolumes(req *interfaces.TaskPrestartRequest, volumes map[string]*structs.VolumeRequest) ([]*drivers.MountConfig, error) { + if len(volumes) == 0 { + return nil, nil + } + + var mounts []*drivers.MountConfig + + mountRequests := partitionMountsByVolume(req.Task.VolumeMounts) + csiMountPoints := h.runner.allocHookResources.GetCSIMounts() + for alias, request := range volumes { + mountsForAlias, ok := mountRequests[alias] + if !ok { + // This task doesn't use the volume + continue + } + + csiMountPoint, ok := csiMountPoints[alias] + if !ok { + return nil, fmt.Errorf("No CSI Mount Point found for volume: %s", alias) + } + + for _, m := range mountsForAlias { + mcfg := &drivers.MountConfig{ + HostPath: csiMountPoint.Source, + TaskPath: m.Destination, + Readonly: request.ReadOnly || m.ReadOnly, + } + mounts = append(mounts, mcfg) + } + } + + return mounts, nil +} + +func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { + h.taskEnv = req.TaskEnv + interpolateVolumeMounts(req.Task.VolumeMounts, h.taskEnv) + + volumes := partitionVolumesByType(h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes) + + hostVolumeMounts, err := h.prepareHostVolumes(req, volumes[structs.VolumeTypeHost]) + if err != nil { + return err + } + + csiVolumeMounts, err := h.prepareCSIVolumes(req, volumes[structs.VolumeTypeCSI]) if err != nil { - h.logger.Error("Failed to generate volume mounts", "error", err) return err } @@ -100,17 +190,22 @@ func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartR // already exist. Although this loop is somewhat expensive, there are only // a small number of mounts that exist within most individual tasks. We may // want to revisit this using a `hookdata` param to be "mount only once" -REQUESTED: - for _, m := range requestedMounts { - for _, em := range mounts { - if em.IsEqual(m) { - continue REQUESTED - } - } - - mounts = append(mounts, m) + mounts := h.runner.hookResources.getMounts() + for _, m := range hostVolumeMounts { + mounts = ensureMountpointInserted(mounts, m) + } + for _, m := range csiVolumeMounts { + mounts = ensureMountpointInserted(mounts, m) } - h.runner.hookResources.setMounts(mounts) + return nil } + +func interpolateVolumeMounts(mounts []*structs.VolumeMount, taskEnv *taskenv.TaskEnv) { + for _, mount := range mounts { + mount.Volume = taskEnv.ReplaceEnv(mount.Volume) + mount.Destination = taskEnv.ReplaceEnv(mount.Destination) + mount.PropagationMode = taskEnv.ReplaceEnv(mount.PropagationMode) + } +} diff --git a/client/allocrunner/taskrunner/volume_hook_test.go b/client/allocrunner/taskrunner/volume_hook_test.go new file mode 100644 index 000000000..abe3a5848 --- /dev/null +++ b/client/allocrunner/taskrunner/volume_hook_test.go @@ -0,0 +1,182 @@ +package taskrunner + +import ( + "testing" + + "github.com/hashicorp/nomad/client/allocrunner/interfaces" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/client/taskenv" + "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/drivers" + "github.com/stretchr/testify/require" +) + +func TestVolumeHook_PartitionMountsByVolume_Works(t *testing.T) { + mounts := []*structs.VolumeMount{ + { + Volume: "foo", + Destination: "/tmp", + ReadOnly: false, + }, + { + Volume: "foo", + Destination: "/bar", + ReadOnly: false, + }, + { + Volume: "baz", + Destination: "/baz", + ReadOnly: false, + }, + } + + expected := map[string][]*structs.VolumeMount{ + "foo": { + { + Volume: "foo", + Destination: "/tmp", + ReadOnly: false, + }, + { + Volume: "foo", + Destination: "/bar", + ReadOnly: false, + }, + }, + "baz": { + { + Volume: "baz", + Destination: "/baz", + ReadOnly: false, + }, + }, + } + + // Test with a real collection + + partitioned := partitionMountsByVolume(mounts) + require.Equal(t, expected, partitioned) + + // Test with nil/emptylist + + partitioned = partitionMountsByVolume(nil) + require.Equal(t, map[string][]*structs.VolumeMount{}, partitioned) +} + +func TestVolumeHook_prepareCSIVolumes(t *testing.T) { + req := &interfaces.TaskPrestartRequest{ + Task: &structs.Task{ + VolumeMounts: []*structs.VolumeMount{ + { + Volume: "foo", + Destination: "/bar", + }, + }, + }, + } + + volumes := map[string]*structs.VolumeRequest{ + "foo": { + Type: "csi", + Source: "my-test-volume", + }, + } + + tr := &TaskRunner{ + allocHookResources: &cstructs.AllocHookResources{ + CSIMounts: map[string]*csimanager.MountInfo{ + "foo": { + Source: "/mnt/my-test-volume", + }, + }, + }, + } + + expected := []*drivers.MountConfig{ + { + HostPath: "/mnt/my-test-volume", + TaskPath: "/bar", + }, + } + + hook := &volumeHook{ + logger: testlog.HCLogger(t), + alloc: structs.MockAlloc(), + runner: tr, + } + mounts, err := hook.prepareCSIVolumes(req, volumes) + require.NoError(t, err) + require.Equal(t, expected, mounts) +} + +func TestVolumeHook_Interpolation(t *testing.T) { + + alloc := mock.Alloc() + task := alloc.Job.TaskGroups[0].Tasks[0] + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, "global").SetHookEnv("volume", + map[string]string{ + "PROPAGATION_MODE": "private", + "VOLUME_ID": "my-other-volume", + }, + ).Build() + + mounts := []*structs.VolumeMount{ + { + Volume: "foo", + Destination: "/tmp", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "foo", + Destination: "/bar-${NOMAD_JOB_NAME}", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "${VOLUME_ID}", + Destination: "/baz", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "foo", + Destination: "/quux", + ReadOnly: false, + PropagationMode: "${PROPAGATION_MODE}", + }, + } + + expected := []*structs.VolumeMount{ + { + Volume: "foo", + Destination: "/tmp", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "foo", + Destination: "/bar-my-job", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "my-other-volume", + Destination: "/baz", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "foo", + Destination: "/quux", + ReadOnly: false, + PropagationMode: "private", + }, + } + + interpolateVolumeMounts(mounts, taskEnv) + require.Equal(t, expected, mounts) +} diff --git a/client/client.go b/client/client.go index 6996875f4..96100fa44 100644 --- a/client/client.go +++ b/client/client.go @@ -26,8 +26,10 @@ import ( "github.com/hashicorp/nomad/client/config" consulApi "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/devicemanager" + "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/fingerprint" "github.com/hashicorp/nomad/client/pluginmanager" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" "github.com/hashicorp/nomad/client/servers" "github.com/hashicorp/nomad/client/state" @@ -42,6 +44,7 @@ import ( "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" nconfig "github.com/hashicorp/nomad/nomad/structs/config" + "github.com/hashicorp/nomad/plugins/csi" "github.com/hashicorp/nomad/plugins/device" "github.com/hashicorp/nomad/plugins/drivers" vaultapi "github.com/hashicorp/vault/api" @@ -258,6 +261,9 @@ type Client struct { // pluginManagers is the set of PluginManagers registered by the client pluginManagers *pluginmanager.PluginGroup + // csimanager is responsible for managing csi plugins. + csimanager csimanager.Manager + // devicemanger is responsible for managing device plugins. devicemanager devicemanager.Manager @@ -279,6 +285,10 @@ type Client struct { // successfully run once. serversContactedCh chan struct{} serversContactedOnce sync.Once + + // dynamicRegistry provides access to plugins that are dynamically registered + // with a nomad client. Currently only used for CSI. + dynamicRegistry dynamicplugins.Registry } var ( @@ -336,6 +346,7 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic c.batchNodeUpdates = newBatchNodeUpdates( c.updateNodeFromDriver, c.updateNodeFromDevices, + c.updateNodeFromCSI, ) // Initialize the server manager @@ -344,11 +355,22 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic // Start server manager rebalancing go routine go c.servers.Start() - // Initialize the client + // initialize the client if err := c.init(); err != nil { return nil, fmt.Errorf("failed to initialize client: %v", err) } + // initialize the dynamic registry (needs to happen after init) + c.dynamicRegistry = + dynamicplugins.NewRegistry(c.stateDB, map[string]dynamicplugins.PluginDispenser{ + dynamicplugins.PluginTypeCSIController: func(info *dynamicplugins.PluginInfo) (interface{}, error) { + return csi.NewClient(info.ConnectionInfo.SocketPath, logger.Named("csi_client").With("plugin.name", info.Name, "plugin.type", "controller")) + }, + dynamicplugins.PluginTypeCSINode: func(info *dynamicplugins.PluginInfo) (interface{}, error) { + return csi.NewClient(info.ConnectionInfo.SocketPath, logger.Named("csi_client").With("plugin.name", info.Name, "plugin.type", "client")) + }, // TODO(tgross): refactor these dispenser constructors into csimanager to tidy it up + }) + // Setup the clients RPC server c.setupClientRpc() @@ -383,6 +405,16 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic allowlistDrivers := cfg.ReadStringListToMap("driver.whitelist") blocklistDrivers := cfg.ReadStringListToMap("driver.blacklist") + // Setup the csi manager + csiConfig := &csimanager.Config{ + Logger: c.logger, + DynamicRegistry: c.dynamicRegistry, + UpdateNodeCSIInfoFunc: c.batchNodeUpdates.updateNodeFromCSI, + } + csiManager := csimanager.New(csiConfig) + c.csimanager = csiManager + c.pluginManagers.RegisterAndRun(csiManager.PluginManager()) + // Setup the driver manager driverConfig := &drivermanager.Config{ Logger: c.logger, @@ -1054,9 +1086,12 @@ func (c *Client) restoreState() error { Vault: c.vaultClient, PrevAllocWatcher: prevAllocWatcher, PrevAllocMigrator: prevAllocMigrator, + DynamicRegistry: c.dynamicRegistry, + CSIManager: c.csimanager, DeviceManager: c.devicemanager, DriverManager: c.drivermanager, ServersContactedCh: c.serversContactedCh, + RPCClient: c, } c.configLock.RUnlock() @@ -1279,6 +1314,12 @@ func (c *Client) setupNode() error { if node.Drivers == nil { node.Drivers = make(map[string]*structs.DriverInfo) } + if node.CSIControllerPlugins == nil { + node.CSIControllerPlugins = make(map[string]*structs.CSIInfo) + } + if node.CSINodePlugins == nil { + node.CSINodePlugins = make(map[string]*structs.CSIInfo) + } if node.Meta == nil { node.Meta = make(map[string]string) } @@ -2310,8 +2351,11 @@ func (c *Client) addAlloc(alloc *structs.Allocation, migrateToken string) error DeviceStatsReporter: c, PrevAllocWatcher: prevAllocWatcher, PrevAllocMigrator: prevAllocMigrator, + DynamicRegistry: c.dynamicRegistry, + CSIManager: c.csimanager, DeviceManager: c.devicemanager, DriverManager: c.drivermanager, + RPCClient: c, } c.configLock.RUnlock() diff --git a/client/csi_controller_endpoint.go b/client/csi_controller_endpoint.go new file mode 100644 index 000000000..d1c25c3f0 --- /dev/null +++ b/client/csi_controller_endpoint.go @@ -0,0 +1,164 @@ +package client + +import ( + "context" + "errors" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/plugins/csi" +) + +// CSIController endpoint is used for interacting with CSI plugins on a client. +// TODO: Submit metrics with labels to allow debugging per plugin perf problems. +type CSIController struct { + c *Client +} + +const ( + // CSIPluginRequestTimeout is the timeout that should be used when making reqs + // against CSI Plugins. It is copied from Kubernetes as an initial seed value. + // https://github.com/kubernetes/kubernetes/blob/e680ad7156f263a6d8129cc0117fda58602e50ad/pkg/volume/csi/csi_plugin.go#L52 + CSIPluginRequestTimeout = 2 * time.Minute +) + +var ( + ErrPluginTypeError = errors.New("CSI Plugin loaded incorrectly") +) + +// ValidateVolume is used during volume registration to validate +// that a volume exists and that the capabilities it was registered with are +// supported by the CSI Plugin and external volume configuration. +func (c *CSIController) ValidateVolume(req *structs.ClientCSIControllerValidateVolumeRequest, resp *structs.ClientCSIControllerValidateVolumeResponse) error { + defer metrics.MeasureSince([]string{"client", "csi_controller", "validate_volume"}, time.Now()) + + if req.VolumeID == "" { + return errors.New("VolumeID is required") + } + + if req.PluginID == "" { + return errors.New("PluginID is required") + } + + plugin, err := c.findControllerPlugin(req.PluginID) + if err != nil { + return err + } + defer plugin.Close() + + caps, err := csi.VolumeCapabilityFromStructs(req.AttachmentMode, req.AccessMode) + if err != nil { + return err + } + + ctx, cancelFn := c.requestContext() + defer cancelFn() + return plugin.ControllerValidateCapabilties(ctx, req.VolumeID, caps) +} + +// AttachVolume is used to attach a volume from a CSI Cluster to +// the storage node provided in the request. +// +// The controller attachment flow currently works as follows: +// 1. Validate the volume request +// 2. Call ControllerPublishVolume on the CSI Plugin to trigger a remote attachment +// +// In the future this may be expanded to request dynamic secrets for attachment. +func (c *CSIController) AttachVolume(req *structs.ClientCSIControllerAttachVolumeRequest, resp *structs.ClientCSIControllerAttachVolumeResponse) error { + defer metrics.MeasureSince([]string{"client", "csi_controller", "publish_volume"}, time.Now()) + plugin, err := c.findControllerPlugin(req.PluginID) + if err != nil { + return err + } + defer plugin.Close() + + // The following block of validation checks should not be reached on a + // real Nomad cluster as all of this data should be validated when registering + // volumes with the cluster. They serve as a defensive check before forwarding + // requests to plugins, and to aid with development. + + if req.VolumeID == "" { + return errors.New("VolumeID is required") + } + + if req.ClientCSINodeID == "" { + return errors.New("ClientCSINodeID is required") + } + + csiReq, err := req.ToCSIRequest() + if err != nil { + return err + } + + // Submit the request for a volume to the CSI Plugin. + ctx, cancelFn := c.requestContext() + defer cancelFn() + cresp, err := plugin.ControllerPublishVolume(ctx, csiReq) + if err != nil { + return err + } + + resp.PublishContext = cresp.PublishContext + return nil +} + +// DetachVolume is used to detach a volume from a CSI Cluster from +// the storage node provided in the request. +func (c *CSIController) DetachVolume(req *structs.ClientCSIControllerDetachVolumeRequest, resp *structs.ClientCSIControllerDetachVolumeResponse) error { + defer metrics.MeasureSince([]string{"client", "csi_controller", "unpublish_volume"}, time.Now()) + plugin, err := c.findControllerPlugin(req.PluginID) + if err != nil { + return err + } + defer plugin.Close() + + // The following block of validation checks should not be reached on a + // real Nomad cluster as all of this data should be validated when registering + // volumes with the cluster. They serve as a defensive check before forwarding + // requests to plugins, and to aid with development. + + if req.VolumeID == "" { + return errors.New("VolumeID is required") + } + + if req.ClientCSINodeID == "" { + return errors.New("ClientCSINodeID is required") + } + + csiReq := req.ToCSIRequest() + + // Submit the request for a volume to the CSI Plugin. + ctx, cancelFn := c.requestContext() + defer cancelFn() + _, err = plugin.ControllerUnpublishVolume(ctx, csiReq) + if err != nil { + return err + } + + return nil +} + +func (c *CSIController) findControllerPlugin(name string) (csi.CSIPlugin, error) { + return c.findPlugin(dynamicplugins.PluginTypeCSIController, name) +} + +// TODO: Cache Plugin Clients? +func (c *CSIController) findPlugin(ptype, name string) (csi.CSIPlugin, error) { + pIface, err := c.c.dynamicRegistry.DispensePlugin(ptype, name) + if err != nil { + return nil, err + } + + plugin, ok := pIface.(csi.CSIPlugin) + if !ok { + return nil, ErrPluginTypeError + } + + return plugin, nil +} + +func (c *CSIController) requestContext() (context.Context, context.CancelFunc) { + return context.WithTimeout(context.Background(), CSIPluginRequestTimeout) +} diff --git a/client/csi_controller_endpoint_test.go b/client/csi_controller_endpoint_test.go new file mode 100644 index 000000000..777a87e4b --- /dev/null +++ b/client/csi_controller_endpoint_test.go @@ -0,0 +1,348 @@ +package client + +import ( + "errors" + "testing" + + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/client/structs" + nstructs "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" + "github.com/hashicorp/nomad/plugins/csi/fake" + "github.com/stretchr/testify/require" +) + +var fakePlugin = &dynamicplugins.PluginInfo{ + Name: "test-plugin", + Type: "csi-controller", + ConnectionInfo: &dynamicplugins.PluginConnectionInfo{}, +} + +func TestCSIController_AttachVolume(t *testing.T) { + t.Parallel() + + cases := []struct { + Name string + ClientSetupFunc func(*fake.Client) + Request *structs.ClientCSIControllerAttachVolumeRequest + ExpectedErr error + ExpectedResponse *structs.ClientCSIControllerAttachVolumeResponse + }{ + { + Name: "returns plugin not found errors", + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: "some-garbage", + }, + }, + ExpectedErr: errors.New("plugin some-garbage for type csi-controller not found"), + }, + { + Name: "validates volumeid is not empty", + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + }, + ExpectedErr: errors.New("VolumeID is required"), + }, + { + Name: "validates nodeid is not empty", + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + }, + ExpectedErr: errors.New("ClientCSINodeID is required"), + }, + { + Name: "validates AccessMode", + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + AccessMode: nstructs.CSIVolumeAccessMode("foo"), + }, + ExpectedErr: errors.New("Unknown volume access mode: foo"), + }, + { + Name: "validates attachmentmode is not empty", + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), + }, + ExpectedErr: errors.New("Unknown volume attachment mode: bar"), + }, + { + Name: "returns transitive errors", + ClientSetupFunc: func(fc *fake.Client) { + fc.NextControllerPublishVolumeErr = errors.New("hello") + }, + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + }, + ExpectedErr: errors.New("hello"), + }, + { + Name: "handles nil PublishContext", + ClientSetupFunc: func(fc *fake.Client) { + fc.NextControllerPublishVolumeResponse = &csi.ControllerPublishVolumeResponse{} + }, + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + }, + ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{}, + }, + { + Name: "handles non-nil PublishContext", + ClientSetupFunc: func(fc *fake.Client) { + fc.NextControllerPublishVolumeResponse = &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{"foo": "bar"}, + } + }, + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + }, + ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{ + PublishContext: map[string]string{"foo": "bar"}, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + require := require.New(t) + client, cleanup := TestClient(t, nil) + defer cleanup() + + fakeClient := &fake.Client{} + if tc.ClientSetupFunc != nil { + tc.ClientSetupFunc(fakeClient) + } + + dispenserFunc := func(*dynamicplugins.PluginInfo) (interface{}, error) { + return fakeClient, nil + } + client.dynamicRegistry.StubDispenserForType(dynamicplugins.PluginTypeCSIController, dispenserFunc) + + err := client.dynamicRegistry.RegisterPlugin(fakePlugin) + require.Nil(err) + + var resp structs.ClientCSIControllerAttachVolumeResponse + err = client.ClientRPC("CSIController.AttachVolume", tc.Request, &resp) + require.Equal(tc.ExpectedErr, err) + if tc.ExpectedResponse != nil { + require.Equal(tc.ExpectedResponse, &resp) + } + }) + } +} + +func TestCSIController_ValidateVolume(t *testing.T) { + t.Parallel() + + cases := []struct { + Name string + ClientSetupFunc func(*fake.Client) + Request *structs.ClientCSIControllerValidateVolumeRequest + ExpectedErr error + ExpectedResponse *structs.ClientCSIControllerValidateVolumeResponse + }{ + { + Name: "validates volumeid is not empty", + Request: &structs.ClientCSIControllerValidateVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + }, + ExpectedErr: errors.New("VolumeID is required"), + }, + { + Name: "returns plugin not found errors", + Request: &structs.ClientCSIControllerValidateVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: "some-garbage", + }, + VolumeID: "foo", + }, + ExpectedErr: errors.New("plugin some-garbage for type csi-controller not found"), + }, + { + Name: "validates attachmentmode", + Request: &structs.ClientCSIControllerValidateVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), + AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, + }, + ExpectedErr: errors.New("Unknown volume attachment mode: bar"), + }, + { + Name: "validates AccessMode", + Request: &structs.ClientCSIControllerValidateVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + AccessMode: nstructs.CSIVolumeAccessMode("foo"), + }, + ExpectedErr: errors.New("Unknown volume access mode: foo"), + }, + { + Name: "returns transitive errors", + ClientSetupFunc: func(fc *fake.Client) { + fc.NextControllerValidateVolumeErr = errors.New("hello") + }, + Request: &structs.ClientCSIControllerValidateVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + }, + ExpectedErr: errors.New("hello"), + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + require := require.New(t) + client, cleanup := TestClient(t, nil) + defer cleanup() + + fakeClient := &fake.Client{} + if tc.ClientSetupFunc != nil { + tc.ClientSetupFunc(fakeClient) + } + + dispenserFunc := func(*dynamicplugins.PluginInfo) (interface{}, error) { + return fakeClient, nil + } + client.dynamicRegistry.StubDispenserForType(dynamicplugins.PluginTypeCSIController, dispenserFunc) + + err := client.dynamicRegistry.RegisterPlugin(fakePlugin) + require.Nil(err) + + var resp structs.ClientCSIControllerValidateVolumeResponse + err = client.ClientRPC("CSIController.ValidateVolume", tc.Request, &resp) + require.Equal(tc.ExpectedErr, err) + if tc.ExpectedResponse != nil { + require.Equal(tc.ExpectedResponse, &resp) + } + }) + } +} + +func TestCSIController_DetachVolume(t *testing.T) { + t.Parallel() + + cases := []struct { + Name string + ClientSetupFunc func(*fake.Client) + Request *structs.ClientCSIControllerDetachVolumeRequest + ExpectedErr error + ExpectedResponse *structs.ClientCSIControllerDetachVolumeResponse + }{ + { + Name: "returns plugin not found errors", + Request: &structs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: "some-garbage", + }, + }, + ExpectedErr: errors.New("plugin some-garbage for type csi-controller not found"), + }, + { + Name: "validates volumeid is not empty", + Request: &structs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + }, + ExpectedErr: errors.New("VolumeID is required"), + }, + { + Name: "validates nodeid is not empty", + Request: &structs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + }, + ExpectedErr: errors.New("ClientCSINodeID is required"), + }, + { + Name: "returns transitive errors", + ClientSetupFunc: func(fc *fake.Client) { + fc.NextControllerUnpublishVolumeErr = errors.New("hello") + }, + Request: &structs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + }, + ExpectedErr: errors.New("hello"), + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + require := require.New(t) + client, cleanup := TestClient(t, nil) + defer cleanup() + + fakeClient := &fake.Client{} + if tc.ClientSetupFunc != nil { + tc.ClientSetupFunc(fakeClient) + } + + dispenserFunc := func(*dynamicplugins.PluginInfo) (interface{}, error) { + return fakeClient, nil + } + client.dynamicRegistry.StubDispenserForType(dynamicplugins.PluginTypeCSIController, dispenserFunc) + + err := client.dynamicRegistry.RegisterPlugin(fakePlugin) + require.Nil(err) + + var resp structs.ClientCSIControllerDetachVolumeResponse + err = client.ClientRPC("CSIController.DetachVolume", tc.Request, &resp) + require.Equal(tc.ExpectedErr, err) + if tc.ExpectedResponse != nil { + require.Equal(tc.ExpectedResponse, &resp) + } + }) + } +} diff --git a/client/devicemanager/manager.go b/client/devicemanager/manager.go index c0fade17d..afc22847b 100644 --- a/client/devicemanager/manager.go +++ b/client/devicemanager/manager.go @@ -128,7 +128,7 @@ func New(c *Config) *manager { // PluginType identifies this manager to the plugin manager and satisfies the PluginManager interface. func (*manager) PluginType() string { return base.PluginTypeDevice } -// Run starts thed device manager. The manager will shutdown any previously +// Run starts the device manager. The manager will shutdown any previously // launched plugin and then begin fingerprinting and stats collection on all new // device plugins. func (m *manager) Run() { diff --git a/client/devicemanager/state/state.go b/client/devicemanager/state/state.go index e74be11ac..20eb789f6 100644 --- a/client/devicemanager/state/state.go +++ b/client/devicemanager/state/state.go @@ -2,10 +2,10 @@ package state import pstructs "github.com/hashicorp/nomad/plugins/shared/structs" -// PluginState is used to store the device managers state across restarts of the +// PluginState is used to store the device manager's state across restarts of the // agent type PluginState struct { - // ReattachConfigs are the set of reattach configs for plugin's launched by + // ReattachConfigs are the set of reattach configs for plugins launched by // the device manager ReattachConfigs map[string]*pstructs.ReattachConfig } diff --git a/client/dynamicplugins/registry.go b/client/dynamicplugins/registry.go new file mode 100644 index 000000000..b0739f0b8 --- /dev/null +++ b/client/dynamicplugins/registry.go @@ -0,0 +1,421 @@ +// dynamicplugins is a package that manages dynamic plugins in Nomad. +// It exposes a registry that allows for plugins to be registered/deregistered +// and also allows subscribers to receive real time updates of these events. +package dynamicplugins + +import ( + "context" + "errors" + "fmt" + "sync" +) + +const ( + PluginTypeCSIController = "csi-controller" + PluginTypeCSINode = "csi-node" +) + +// Registry is an interface that allows for the dynamic registration of plugins +// that are running as Nomad Tasks. +type Registry interface { + RegisterPlugin(info *PluginInfo) error + DeregisterPlugin(ptype, name string) error + + ListPlugins(ptype string) []*PluginInfo + DispensePlugin(ptype, name string) (interface{}, error) + + PluginsUpdatedCh(ctx context.Context, ptype string) <-chan *PluginUpdateEvent + + Shutdown() + + StubDispenserForType(ptype string, dispenser PluginDispenser) +} + +// RegistryState is what we persist in the client state store. It contains +// a map of plugin types to maps of plugin name -> PluginInfo. +type RegistryState struct { + Plugins map[string]map[string]*PluginInfo +} + +type PluginDispenser func(info *PluginInfo) (interface{}, error) + +// NewRegistry takes a map of `plugintype` to PluginDispenser functions +// that should be used to vend clients for plugins to be used. +func NewRegistry(state StateStorage, dispensers map[string]PluginDispenser) Registry { + + registry := &dynamicRegistry{ + plugins: make(map[string]map[string]*PluginInfo), + broadcasters: make(map[string]*pluginEventBroadcaster), + dispensers: dispensers, + state: state, + } + + // populate the state and initial broadcasters if we have an + // existing state DB to restore + if state != nil { + storedState, err := state.GetDynamicPluginRegistryState() + if err == nil && storedState != nil { + registry.plugins = storedState.Plugins + for ptype := range registry.plugins { + registry.broadcasterForPluginType(ptype) + } + } + } + + return registry +} + +// StateStorage is used to persist the dynamic plugin registry's state +// across agent restarts. +type StateStorage interface { + // GetDynamicPluginRegistryState is used to restore the registry state + GetDynamicPluginRegistryState() (*RegistryState, error) + + // PutDynamicPluginRegistryState is used to store the registry state + PutDynamicPluginRegistryState(state *RegistryState) error +} + +// PluginInfo is the metadata that is stored by the registry for a given plugin. +type PluginInfo struct { + Name string + Type string + Version string + + // ConnectionInfo should only be used externally during `RegisterPlugin` and + // may not be exposed in the future. + ConnectionInfo *PluginConnectionInfo + + // AllocID tracks the allocation running the plugin + AllocID string + + // Options is used for plugin registrations to pass further metadata along to + // other subsystems + Options map[string]string +} + +// PluginConnectionInfo is the data required to connect to the plugin. +// note: We currently only support Unix Domain Sockets, but this may be expanded +// to support other connection modes in the future. +type PluginConnectionInfo struct { + // SocketPath is the path to the plugins api socket. + SocketPath string +} + +// EventType is the enum of events that will be emitted by a Registry's +// PluginsUpdatedCh. +type EventType string + +const ( + // EventTypeRegistered is emitted by the Registry when a new plugin has been + // registered. + EventTypeRegistered EventType = "registered" + // EventTypeDeregistered is emitted by the Registry when a plugin has been + // removed. + EventTypeDeregistered EventType = "deregistered" +) + +// PluginUpdateEvent is a struct that is sent over a PluginsUpdatedCh when +// plugins are added or removed from the registry. +type PluginUpdateEvent struct { + EventType EventType + Info *PluginInfo +} + +type dynamicRegistry struct { + plugins map[string]map[string]*PluginInfo + pluginsLock sync.RWMutex + + broadcasters map[string]*pluginEventBroadcaster + broadcastersLock sync.Mutex + + dispensers map[string]PluginDispenser + stubDispensers map[string]PluginDispenser + + state StateStorage +} + +// StubDispenserForType allows test functions to provide alternative plugin +// dispensers to simplify writing tests for higher level Nomad features. +// This function should not be called from production code. +func (d *dynamicRegistry) StubDispenserForType(ptype string, dispenser PluginDispenser) { + // delete from stubs + if dispenser == nil && d.stubDispensers != nil { + delete(d.stubDispensers, ptype) + if len(d.stubDispensers) == 0 { + d.stubDispensers = nil + } + + return + } + + // setup stubs + if d.stubDispensers == nil { + d.stubDispensers = make(map[string]PluginDispenser, 1) + } + + d.stubDispensers[ptype] = dispenser +} + +func (d *dynamicRegistry) RegisterPlugin(info *PluginInfo) error { + if info.Type == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return errors.New("Plugin.Type must not be empty") + } + + if info.ConnectionInfo == nil { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return errors.New("Plugin.ConnectionInfo must not be nil") + } + + if info.Name == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return errors.New("Plugin.Name must not be empty") + } + + d.pluginsLock.Lock() + defer d.pluginsLock.Unlock() + + pmap, ok := d.plugins[info.Type] + if !ok { + pmap = make(map[string]*PluginInfo, 1) + d.plugins[info.Type] = pmap + } + + pmap[info.Name] = info + + broadcaster := d.broadcasterForPluginType(info.Type) + event := &PluginUpdateEvent{ + EventType: EventTypeRegistered, + Info: info, + } + broadcaster.broadcast(event) + + return d.sync() +} + +func (d *dynamicRegistry) broadcasterForPluginType(ptype string) *pluginEventBroadcaster { + d.broadcastersLock.Lock() + defer d.broadcastersLock.Unlock() + + broadcaster, ok := d.broadcasters[ptype] + if !ok { + broadcaster = newPluginEventBroadcaster() + d.broadcasters[ptype] = broadcaster + } + + return broadcaster +} + +func (d *dynamicRegistry) DeregisterPlugin(ptype, name string) error { + d.pluginsLock.Lock() + defer d.pluginsLock.Unlock() + + if ptype == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return errors.New("must specify plugin type to deregister") + } + if name == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return errors.New("must specify plugin name to deregister") + } + + pmap, ok := d.plugins[ptype] + if !ok { + // If this occurs there's a bug in the registration handler. + return fmt.Errorf("no plugins registered for type: %s", ptype) + } + + info, ok := pmap[name] + if !ok { + // plugin already deregistered, don't send events or try re-deleting. + return nil + } + delete(pmap, name) + + broadcaster := d.broadcasterForPluginType(ptype) + event := &PluginUpdateEvent{ + EventType: EventTypeDeregistered, + Info: info, + } + broadcaster.broadcast(event) + + return d.sync() +} + +func (d *dynamicRegistry) ListPlugins(ptype string) []*PluginInfo { + d.pluginsLock.RLock() + defer d.pluginsLock.RUnlock() + + pmap, ok := d.plugins[ptype] + if !ok { + return nil + } + + plugins := make([]*PluginInfo, 0, len(pmap)) + + for _, info := range pmap { + plugins = append(plugins, info) + } + + return plugins +} + +func (d *dynamicRegistry) DispensePlugin(ptype string, name string) (interface{}, error) { + d.pluginsLock.Lock() + defer d.pluginsLock.Unlock() + + if ptype == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return nil, errors.New("must specify plugin type to dispense") + } + if name == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return nil, errors.New("must specify plugin name to dispense") + } + + dispenseFunc, ok := d.dispensers[ptype] + if !ok { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return nil, fmt.Errorf("no plugin dispenser found for type: %s", ptype) + } + + // After initially loading the dispenser (to avoid masking missing setup in + // client/client.go), we then check to see if we have any stub dispensers for + // this plugin type. If we do, then replace the dispenser fn with the stub. + if d.stubDispensers != nil { + if stub, ok := d.stubDispensers[ptype]; ok { + dispenseFunc = stub + } + } + + pmap, ok := d.plugins[ptype] + if !ok { + return nil, fmt.Errorf("no plugins registered for type: %s", ptype) + } + + info, ok := pmap[name] + if !ok { + return nil, fmt.Errorf("plugin %s for type %s not found", name, ptype) + } + + return dispenseFunc(info) +} + +// PluginsUpdatedCh returns a channel over which plugin events for the requested +// plugin type will be emitted. These events are strongly ordered and will never +// be dropped. +// +// The receiving channel _must not_ be closed before the provided context is +// cancelled. +func (d *dynamicRegistry) PluginsUpdatedCh(ctx context.Context, ptype string) <-chan *PluginUpdateEvent { + b := d.broadcasterForPluginType(ptype) + ch := b.subscribe() + go func() { + select { + case <-b.shutdownCh: + return + case <-ctx.Done(): + b.unsubscribe(ch) + } + }() + + return ch +} + +func (d *dynamicRegistry) sync() error { + if d.state != nil { + storedState := &RegistryState{Plugins: d.plugins} + return d.state.PutDynamicPluginRegistryState(storedState) + } + return nil +} + +func (d *dynamicRegistry) Shutdown() { + for _, b := range d.broadcasters { + b.shutdown() + } +} + +type pluginEventBroadcaster struct { + stopCh chan struct{} + shutdownCh chan struct{} + publishCh chan *PluginUpdateEvent + + subscriptions map[chan *PluginUpdateEvent]struct{} + subscriptionsLock sync.RWMutex +} + +func newPluginEventBroadcaster() *pluginEventBroadcaster { + b := &pluginEventBroadcaster{ + stopCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + publishCh: make(chan *PluginUpdateEvent, 1), + subscriptions: make(map[chan *PluginUpdateEvent]struct{}), + } + go b.run() + return b +} + +func (p *pluginEventBroadcaster) run() { + for { + select { + case <-p.stopCh: + close(p.shutdownCh) + return + case msg := <-p.publishCh: + p.subscriptionsLock.RLock() + for msgCh := range p.subscriptions { + select { + case msgCh <- msg: + } + } + p.subscriptionsLock.RUnlock() + } + } +} + +func (p *pluginEventBroadcaster) shutdown() { + close(p.stopCh) + + // Wait for loop to exit before closing subscriptions + <-p.shutdownCh + + p.subscriptionsLock.Lock() + for sub := range p.subscriptions { + delete(p.subscriptions, sub) + close(sub) + } + p.subscriptionsLock.Unlock() +} + +func (p *pluginEventBroadcaster) broadcast(e *PluginUpdateEvent) { + p.publishCh <- e +} + +func (p *pluginEventBroadcaster) subscribe() chan *PluginUpdateEvent { + p.subscriptionsLock.Lock() + defer p.subscriptionsLock.Unlock() + + ch := make(chan *PluginUpdateEvent, 1) + p.subscriptions[ch] = struct{}{} + return ch +} + +func (p *pluginEventBroadcaster) unsubscribe(ch chan *PluginUpdateEvent) { + p.subscriptionsLock.Lock() + defer p.subscriptionsLock.Unlock() + + _, ok := p.subscriptions[ch] + if ok { + delete(p.subscriptions, ch) + close(ch) + } +} diff --git a/client/dynamicplugins/registry_test.go b/client/dynamicplugins/registry_test.go new file mode 100644 index 000000000..a2621c05f --- /dev/null +++ b/client/dynamicplugins/registry_test.go @@ -0,0 +1,244 @@ +package dynamicplugins + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestPluginEventBroadcaster_SendsMessagesToAllClients(t *testing.T) { + t.Parallel() + b := newPluginEventBroadcaster() + defer close(b.stopCh) + var rcv1, rcv2 bool + + ch1 := b.subscribe() + ch2 := b.subscribe() + + listenFunc := func(ch chan *PluginUpdateEvent, updateBool *bool) { + select { + case <-ch: + *updateBool = true + } + } + + go listenFunc(ch1, &rcv1) + go listenFunc(ch2, &rcv2) + + b.broadcast(&PluginUpdateEvent{}) + + require.Eventually(t, func() bool { + return rcv1 == true && rcv2 == true + }, 1*time.Second, 200*time.Millisecond) +} + +func TestPluginEventBroadcaster_UnsubscribeWorks(t *testing.T) { + t.Parallel() + + b := newPluginEventBroadcaster() + defer close(b.stopCh) + var rcv1 bool + + ch1 := b.subscribe() + + listenFunc := func(ch chan *PluginUpdateEvent, updateBool *bool) { + select { + case e := <-ch: + if e == nil { + *updateBool = true + } + } + } + + go listenFunc(ch1, &rcv1) + + b.unsubscribe(ch1) + + b.broadcast(&PluginUpdateEvent{}) + + require.Eventually(t, func() bool { + return rcv1 == true + }, 1*time.Second, 200*time.Millisecond) +} + +func TestDynamicRegistry_RegisterPlugin_SendsUpdateEvents(t *testing.T) { + t.Parallel() + r := NewRegistry(nil, nil) + + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + + ch := r.PluginsUpdatedCh(ctx, "csi") + receivedRegistrationEvent := false + + listenFunc := func(ch <-chan *PluginUpdateEvent, updateBool *bool) { + select { + case e := <-ch: + if e == nil { + return + } + + if e.EventType == EventTypeRegistered { + *updateBool = true + } + } + } + + go listenFunc(ch, &receivedRegistrationEvent) + + err := r.RegisterPlugin(&PluginInfo{ + Type: "csi", + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + + require.NoError(t, err) + + require.Eventually(t, func() bool { + return receivedRegistrationEvent == true + }, 1*time.Second, 200*time.Millisecond) +} + +func TestDynamicRegistry_DeregisterPlugin_SendsUpdateEvents(t *testing.T) { + t.Parallel() + r := NewRegistry(nil, nil) + + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + + ch := r.PluginsUpdatedCh(ctx, "csi") + receivedDeregistrationEvent := false + + listenFunc := func(ch <-chan *PluginUpdateEvent, updateBool *bool) { + for { + select { + case e := <-ch: + if e == nil { + return + } + + if e.EventType == EventTypeDeregistered { + *updateBool = true + } + } + } + } + + go listenFunc(ch, &receivedDeregistrationEvent) + + err := r.RegisterPlugin(&PluginInfo{ + Type: "csi", + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + require.NoError(t, err) + + err = r.DeregisterPlugin("csi", "my-plugin") + require.NoError(t, err) + + require.Eventually(t, func() bool { + return receivedDeregistrationEvent == true + }, 1*time.Second, 200*time.Millisecond) +} + +func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) { + dispenseFn := func(i *PluginInfo) (interface{}, error) { + return struct{}{}, nil + } + + registry := NewRegistry(nil, map[string]PluginDispenser{"csi": dispenseFn}) + + err := registry.RegisterPlugin(&PluginInfo{ + Type: "csi", + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + require.NoError(t, err) + + result, err := registry.DispensePlugin("unknown-type", "unknown-name") + require.Nil(t, result) + require.EqualError(t, err, "no plugin dispenser found for type: unknown-type") + + result, err = registry.DispensePlugin("csi", "unknown-name") + require.Nil(t, result) + require.EqualError(t, err, "plugin unknown-name for type csi not found") + + result, err = registry.DispensePlugin("csi", "my-plugin") + require.NotNil(t, result) + require.NoError(t, err) +} + +func TestDynamicRegistry_IsolatePluginTypes(t *testing.T) { + t.Parallel() + r := NewRegistry(nil, nil) + + err := r.RegisterPlugin(&PluginInfo{ + Type: PluginTypeCSIController, + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + require.NoError(t, err) + + err = r.RegisterPlugin(&PluginInfo{ + Type: PluginTypeCSINode, + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + require.NoError(t, err) + + err = r.DeregisterPlugin(PluginTypeCSIController, "my-plugin") + require.NoError(t, err) + require.Equal(t, len(r.ListPlugins(PluginTypeCSINode)), 1) + require.Equal(t, len(r.ListPlugins(PluginTypeCSIController)), 0) +} + +func TestDynamicRegistry_StateStore(t *testing.T) { + t.Parallel() + dispenseFn := func(i *PluginInfo) (interface{}, error) { + return i, nil + } + + memdb := &MemDB{} + oldR := NewRegistry(memdb, map[string]PluginDispenser{"csi": dispenseFn}) + + err := oldR.RegisterPlugin(&PluginInfo{ + Type: "csi", + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + require.NoError(t, err) + result, err := oldR.DispensePlugin("csi", "my-plugin") + require.NotNil(t, result) + require.NoError(t, err) + + // recreate the registry from the state store and query again + newR := NewRegistry(memdb, map[string]PluginDispenser{"csi": dispenseFn}) + result, err = newR.DispensePlugin("csi", "my-plugin") + require.NotNil(t, result) + require.NoError(t, err) +} + +// MemDB implements a StateDB that stores data in memory and should only be +// used for testing. All methods are safe for concurrent use. This is a +// partial implementation of the MemDB in the client/state package, copied +// here to avoid circular dependencies. +type MemDB struct { + dynamicManagerPs *RegistryState + mu sync.RWMutex +} + +func (m *MemDB) GetDynamicPluginRegistryState() (*RegistryState, error) { + m.mu.Lock() + defer m.mu.Unlock() + return m.dynamicManagerPs, nil +} + +func (m *MemDB) PutDynamicPluginRegistryState(ps *RegistryState) error { + m.mu.Lock() + defer m.mu.Unlock() + m.dynamicManagerPs = ps + return nil +} diff --git a/client/node_updater.go b/client/node_updater.go index 702cfe8c2..a87714c92 100644 --- a/client/node_updater.go +++ b/client/node_updater.go @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/nomad/client/devicemanager" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" "github.com/hashicorp/nomad/nomad/structs" ) @@ -40,6 +41,23 @@ SEND_BATCH: c.configLock.Lock() defer c.configLock.Unlock() + // csi updates + var csiChanged bool + c.batchNodeUpdates.batchCSIUpdates(func(name string, info *structs.CSIInfo) { + if c.updateNodeFromCSIControllerLocked(name, info) { + if c.config.Node.CSIControllerPlugins[name].UpdateTime.IsZero() { + c.config.Node.CSIControllerPlugins[name].UpdateTime = time.Now() + } + csiChanged = true + } + if c.updateNodeFromCSINodeLocked(name, info) { + if c.config.Node.CSINodePlugins[name].UpdateTime.IsZero() { + c.config.Node.CSINodePlugins[name].UpdateTime = time.Now() + } + csiChanged = true + } + }) + // driver node updates var driverChanged bool c.batchNodeUpdates.batchDriverUpdates(func(driver string, info *structs.DriverInfo) { @@ -61,13 +79,128 @@ SEND_BATCH: }) // only update the node if changes occurred - if driverChanged || devicesChanged { + if driverChanged || devicesChanged || csiChanged { c.updateNodeLocked() } close(c.fpInitialized) } +// updateNodeFromCSI receives a CSIInfo struct for the plugin and updates the +// node accordingly +func (c *Client) updateNodeFromCSI(name string, info *structs.CSIInfo) { + c.configLock.Lock() + defer c.configLock.Unlock() + + changed := false + + if c.updateNodeFromCSIControllerLocked(name, info) { + if c.config.Node.CSIControllerPlugins[name].UpdateTime.IsZero() { + c.config.Node.CSIControllerPlugins[name].UpdateTime = time.Now() + } + changed = true + } + + if c.updateNodeFromCSINodeLocked(name, info) { + if c.config.Node.CSINodePlugins[name].UpdateTime.IsZero() { + c.config.Node.CSINodePlugins[name].UpdateTime = time.Now() + } + changed = true + } + + if changed { + c.updateNodeLocked() + } +} + +// updateNodeFromCSIControllerLocked makes the changes to the node from a csi +// update but does not send the update to the server. c.configLock must be held +// before calling this func. +// +// It is safe to call for all CSI Updates, but will only perform changes when +// a ControllerInfo field is present. +func (c *Client) updateNodeFromCSIControllerLocked(name string, info *structs.CSIInfo) bool { + var changed bool + if info.ControllerInfo == nil { + return false + } + i := info.Copy() + i.NodeInfo = nil + + oldController, hadController := c.config.Node.CSIControllerPlugins[name] + if !hadController { + // If the controller info has not yet been set, do that here + changed = true + c.config.Node.CSIControllerPlugins[name] = i + } else { + // The controller info has already been set, fix it up + if !oldController.Equal(i) { + c.config.Node.CSIControllerPlugins[name] = i + changed = true + } + + // If health state has changed, trigger node event + if oldController.Healthy != i.Healthy || oldController.HealthDescription != i.HealthDescription { + changed = true + if i.HealthDescription != "" { + event := &structs.NodeEvent{ + Subsystem: "CSI", + Message: i.HealthDescription, + Timestamp: time.Now(), + Details: map[string]string{"plugin": name, "type": "controller"}, + } + c.triggerNodeEvent(event) + } + } + } + + return changed +} + +// updateNodeFromCSINodeLocked makes the changes to the node from a csi +// update but does not send the update to the server. c.configLock must be hel +// before calling this func. +// +// It is safe to call for all CSI Updates, but will only perform changes when +// a NodeInfo field is present. +func (c *Client) updateNodeFromCSINodeLocked(name string, info *structs.CSIInfo) bool { + var changed bool + if info.NodeInfo == nil { + return false + } + i := info.Copy() + i.ControllerInfo = nil + + oldNode, hadNode := c.config.Node.CSINodePlugins[name] + if !hadNode { + // If the Node info has not yet been set, do that here + changed = true + c.config.Node.CSINodePlugins[name] = i + } else { + // The node info has already been set, fix it up + if !oldNode.Equal(info) { + c.config.Node.CSINodePlugins[name] = i + changed = true + } + + // If health state has changed, trigger node event + if oldNode.Healthy != i.Healthy || oldNode.HealthDescription != i.HealthDescription { + changed = true + if i.HealthDescription != "" { + event := &structs.NodeEvent{ + Subsystem: "CSI", + Message: i.HealthDescription, + Timestamp: time.Now(), + Details: map[string]string{"plugin": name, "type": "node"}, + } + c.triggerNodeEvent(event) + } + } + } + + return changed +} + // updateNodeFromDriver receives a DriverInfo struct for the driver and updates // the node accordingly func (c *Client) updateNodeFromDriver(name string, info *structs.DriverInfo) { @@ -187,20 +320,71 @@ type batchNodeUpdates struct { devicesBatched bool devicesCB devicemanager.UpdateNodeDevicesFn devicesMu sync.Mutex + + // access to csi fields must hold csiMu lock + csiNodePlugins map[string]*structs.CSIInfo + csiControllerPlugins map[string]*structs.CSIInfo + csiBatched bool + csiCB csimanager.UpdateNodeCSIInfoFunc + csiMu sync.Mutex } func newBatchNodeUpdates( driverCB drivermanager.UpdateNodeDriverInfoFn, - devicesCB devicemanager.UpdateNodeDevicesFn) *batchNodeUpdates { + devicesCB devicemanager.UpdateNodeDevicesFn, + csiCB csimanager.UpdateNodeCSIInfoFunc) *batchNodeUpdates { return &batchNodeUpdates{ - drivers: make(map[string]*structs.DriverInfo), - driverCB: driverCB, - devices: []*structs.NodeDeviceResource{}, - devicesCB: devicesCB, + drivers: make(map[string]*structs.DriverInfo), + driverCB: driverCB, + devices: []*structs.NodeDeviceResource{}, + devicesCB: devicesCB, + csiNodePlugins: make(map[string]*structs.CSIInfo), + csiControllerPlugins: make(map[string]*structs.CSIInfo), + csiCB: csiCB, } } +// updateNodeFromCSI implements csimanager.UpdateNodeCSIInfoFunc and is used in +// the csi manager to send csi fingerprints to the server. +func (b *batchNodeUpdates) updateNodeFromCSI(plugin string, info *structs.CSIInfo) { + b.csiMu.Lock() + defer b.csiMu.Unlock() + if b.csiBatched { + b.csiCB(plugin, info) + return + } + + // Only one of these is expected to be set, but a future implementation that + // explicitly models monolith plugins with a single fingerprinter may set both + if info.ControllerInfo != nil { + b.csiControllerPlugins[plugin] = info + } + + if info.NodeInfo != nil { + b.csiNodePlugins[plugin] = info + } +} + +// batchCSIUpdates sends all of the batched CSI updates by calling f for each +// plugin batched +func (b *batchNodeUpdates) batchCSIUpdates(f csimanager.UpdateNodeCSIInfoFunc) error { + b.csiMu.Lock() + defer b.csiMu.Unlock() + if b.csiBatched { + return fmt.Errorf("csi updates already batched") + } + + b.csiBatched = true + for plugin, info := range b.csiNodePlugins { + f(plugin, info) + } + for plugin, info := range b.csiControllerPlugins { + f(plugin, info) + } + return nil +} + // updateNodeFromDriver implements drivermanager.UpdateNodeDriverInfoFn and is // used in the driver manager to send driver fingerprints to func (b *batchNodeUpdates) updateNodeFromDriver(driver string, info *structs.DriverInfo) { diff --git a/client/pluginmanager/csimanager/doc.go b/client/pluginmanager/csimanager/doc.go new file mode 100644 index 000000000..42400a092 --- /dev/null +++ b/client/pluginmanager/csimanager/doc.go @@ -0,0 +1,15 @@ +/** +csimanager manages locally running CSI Plugins on a Nomad host, and provides a +few different interfaces. + +It provides: +- a pluginmanager.PluginManager implementation that is used to fingerprint and + heartbeat local node plugins +- (TODO) a csimanager.AttachmentWaiter implementation that can be used to wait for an + external CSIVolume to be attached to the node before returning +- (TODO) a csimanager.NodeController implementation that is used to manage the node-local + portions of the CSI specification, and encompassess volume staging/publishing +- (TODO) a csimanager.VolumeChecker implementation that can be used by hooks to ensure + their volumes are healthy(ish) +*/ +package csimanager diff --git a/client/pluginmanager/csimanager/fingerprint.go b/client/pluginmanager/csimanager/fingerprint.go new file mode 100644 index 000000000..b0da9c8fc --- /dev/null +++ b/client/pluginmanager/csimanager/fingerprint.go @@ -0,0 +1,175 @@ +package csimanager + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" +) + +type pluginFingerprinter struct { + logger hclog.Logger + client csi.CSIPlugin + info *dynamicplugins.PluginInfo + + // basicInfo holds a cache of data that should not change within a CSI plugin. + // This allows us to minimize the number of requests we make to plugins on each + // run of the fingerprinter, and reduces the chances of performing overly + // expensive actions repeatedly, and improves stability of data through + // transient failures. + basicInfo *structs.CSIInfo + + fingerprintNode bool + fingerprintController bool + + hadFirstSuccessfulFingerprint bool + // hadFirstSuccessfulFingerprintCh is closed the first time a fingerprint + // is completed successfully. + hadFirstSuccessfulFingerprintCh chan struct{} + + // requiresStaging is set on a first successful fingerprint. It allows the + // csimanager to efficiently query this as it shouldn't change after a plugin + // is started. Removing this bool will require storing a cache of recent successful + // results that can be used by subscribers of the `hadFirstSuccessfulFingerprintCh`. + requiresStaging bool +} + +func (p *pluginFingerprinter) fingerprint(ctx context.Context) *structs.CSIInfo { + if p.basicInfo == nil { + info, err := p.buildBasicFingerprint(ctx) + if err != nil { + // If we receive a fingerprinting error, update the stats with as much + // info as possible and wait for the next fingerprint interval. + info.HealthDescription = fmt.Sprintf("failed initial fingerprint with err: %v", err) + info.Healthy = false + + return info + } + + // If fingerprinting succeeded, we don't need to repopulate the basic + // info again. + p.basicInfo = info + } + + info := p.basicInfo.Copy() + var fp *structs.CSIInfo + var err error + + if p.fingerprintNode { + fp, err = p.buildNodeFingerprint(ctx, info) + } else if p.fingerprintController { + fp, err = p.buildControllerFingerprint(ctx, info) + } + + if err != nil { + info.Healthy = false + info.HealthDescription = fmt.Sprintf("failed fingerprinting with error: %v", err) + } else { + info = fp + if !p.hadFirstSuccessfulFingerprint { + p.hadFirstSuccessfulFingerprint = true + if p.fingerprintNode { + p.requiresStaging = info.NodeInfo.RequiresNodeStageVolume + } + close(p.hadFirstSuccessfulFingerprintCh) + } + } + + return info +} + +func (p *pluginFingerprinter) buildBasicFingerprint(ctx context.Context) (*structs.CSIInfo, error) { + info := &structs.CSIInfo{ + PluginID: p.info.Name, + AllocID: p.info.AllocID, + Provider: p.info.Options["Provider"], + ProviderVersion: p.info.Version, + Healthy: false, + HealthDescription: "initial fingerprint not completed", + } + + if p.fingerprintNode { + info.NodeInfo = &structs.CSINodeInfo{} + } + if p.fingerprintController { + info.ControllerInfo = &structs.CSIControllerInfo{} + } + + capabilities, err := p.client.PluginGetCapabilities(ctx) + if err != nil { + return info, err + } + + info.RequiresControllerPlugin = capabilities.HasControllerService() + info.RequiresTopologies = capabilities.HasToplogies() + + if p.fingerprintNode { + nodeInfo, err := p.client.NodeGetInfo(ctx) + if err != nil { + return info, err + } + + info.NodeInfo.ID = nodeInfo.NodeID + info.NodeInfo.MaxVolumes = nodeInfo.MaxVolumes + info.NodeInfo.AccessibleTopology = structCSITopologyFromCSITopology(nodeInfo.AccessibleTopology) + } + + return info, nil +} + +func applyCapabilitySetToControllerInfo(cs *csi.ControllerCapabilitySet, info *structs.CSIControllerInfo) { + info.SupportsReadOnlyAttach = cs.HasPublishReadonly + info.SupportsAttachDetach = cs.HasPublishUnpublishVolume + info.SupportsListVolumes = cs.HasListVolumes + info.SupportsListVolumesAttachedNodes = cs.HasListVolumesPublishedNodes +} + +func (p *pluginFingerprinter) buildControllerFingerprint(ctx context.Context, base *structs.CSIInfo) (*structs.CSIInfo, error) { + fp := base.Copy() + + healthy, err := p.client.PluginProbe(ctx) + if err != nil { + return nil, err + } + fp.SetHealthy(healthy) + + caps, err := p.client.ControllerGetCapabilities(ctx) + if err != nil { + return fp, err + } + applyCapabilitySetToControllerInfo(caps, fp.ControllerInfo) + + return fp, nil +} + +func (p *pluginFingerprinter) buildNodeFingerprint(ctx context.Context, base *structs.CSIInfo) (*structs.CSIInfo, error) { + fp := base.Copy() + + healthy, err := p.client.PluginProbe(ctx) + if err != nil { + return nil, err + } + fp.SetHealthy(healthy) + + caps, err := p.client.NodeGetCapabilities(ctx) + if err != nil { + return fp, err + } + fp.NodeInfo.RequiresNodeStageVolume = caps.HasStageUnstageVolume + + return fp, nil +} + +func structCSITopologyFromCSITopology(a *csi.Topology) *structs.CSITopology { + if a == nil { + return nil + } + + return &structs.CSITopology{ + Segments: helper.CopyMapStringString(a.Segments), + } +} diff --git a/client/pluginmanager/csimanager/fingerprint_test.go b/client/pluginmanager/csimanager/fingerprint_test.go new file mode 100644 index 000000000..4ce5951ea --- /dev/null +++ b/client/pluginmanager/csimanager/fingerprint_test.go @@ -0,0 +1,277 @@ +package csimanager + +import ( + "context" + "errors" + "testing" + + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" + "github.com/stretchr/testify/require" +) + +func TestBuildBasicFingerprint_Node(t *testing.T) { + tt := []struct { + Name string + + Capabilities *csi.PluginCapabilitySet + CapabilitiesErr error + CapabilitiesCallCount int64 + + NodeInfo *csi.NodeGetInfoResponse + NodeInfoErr error + NodeInfoCallCount int64 + + ExpectedCSIInfo *structs.CSIInfo + ExpectedErr error + }{ + { + Name: "Minimal successful response", + + Capabilities: &csi.PluginCapabilitySet{}, + CapabilitiesCallCount: 1, + + NodeInfo: &csi.NodeGetInfoResponse{ + NodeID: "foobar", + MaxVolumes: 5, + AccessibleTopology: nil, + }, + NodeInfoCallCount: 1, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + NodeInfo: &structs.CSINodeInfo{ + ID: "foobar", + MaxVolumes: 5, + }, + }, + }, + { + Name: "Successful response with capabilities and topologies", + + Capabilities: csi.NewTestPluginCapabilitySet(true, false), + CapabilitiesCallCount: 1, + + NodeInfo: &csi.NodeGetInfoResponse{ + NodeID: "foobar", + MaxVolumes: 5, + AccessibleTopology: &csi.Topology{ + Segments: map[string]string{ + "com.hashicorp.nomad/node-id": "foobar", + }, + }, + }, + NodeInfoCallCount: 1, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + + RequiresTopologies: true, + + NodeInfo: &structs.CSINodeInfo{ + ID: "foobar", + MaxVolumes: 5, + AccessibleTopology: &structs.CSITopology{ + Segments: map[string]string{ + "com.hashicorp.nomad/node-id": "foobar", + }, + }, + }, + }, + }, + { + Name: "PluginGetCapabilities Failed", + + CapabilitiesErr: errors.New("request failed"), + CapabilitiesCallCount: 1, + + NodeInfoCallCount: 0, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + NodeInfo: &structs.CSINodeInfo{}, + }, + ExpectedErr: errors.New("request failed"), + }, + { + Name: "NodeGetInfo Failed", + + Capabilities: &csi.PluginCapabilitySet{}, + CapabilitiesCallCount: 1, + + NodeInfoErr: errors.New("request failed"), + NodeInfoCallCount: 1, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + NodeInfo: &structs.CSINodeInfo{}, + }, + ExpectedErr: errors.New("request failed"), + }, + } + + for _, test := range tt { + t.Run(test.Name, func(t *testing.T) { + client, im := setupTestNodeInstanceManager(t) + + client.NextPluginGetCapabilitiesResponse = test.Capabilities + client.NextPluginGetCapabilitiesErr = test.CapabilitiesErr + + client.NextNodeGetInfoResponse = test.NodeInfo + client.NextNodeGetInfoErr = test.NodeInfoErr + + info, err := im.fp.buildBasicFingerprint(context.TODO()) + + require.Equal(t, test.ExpectedCSIInfo, info) + require.Equal(t, test.ExpectedErr, err) + + require.Equal(t, test.CapabilitiesCallCount, client.PluginGetCapabilitiesCallCount) + require.Equal(t, test.NodeInfoCallCount, client.NodeGetInfoCallCount) + }) + } +} + +func TestBuildControllerFingerprint(t *testing.T) { + tt := []struct { + Name string + + Capabilities *csi.ControllerCapabilitySet + CapabilitiesErr error + CapabilitiesCallCount int64 + + ProbeResponse bool + ProbeErr error + ProbeCallCount int64 + + ExpectedControllerInfo *structs.CSIControllerInfo + ExpectedErr error + }{ + { + Name: "Minimal successful response", + + Capabilities: &csi.ControllerCapabilitySet{}, + CapabilitiesCallCount: 1, + + ProbeResponse: true, + ProbeCallCount: 1, + + ExpectedControllerInfo: &structs.CSIControllerInfo{}, + }, + { + Name: "Successful response with capabilities", + + Capabilities: &csi.ControllerCapabilitySet{ + HasListVolumes: true, + }, + CapabilitiesCallCount: 1, + + ProbeResponse: true, + ProbeCallCount: 1, + + ExpectedControllerInfo: &structs.CSIControllerInfo{ + SupportsListVolumes: true, + }, + }, + { + Name: "ControllerGetCapabilities Failed", + + CapabilitiesErr: errors.New("request failed"), + CapabilitiesCallCount: 1, + + ProbeResponse: true, + ProbeCallCount: 1, + + ExpectedControllerInfo: &structs.CSIControllerInfo{}, + ExpectedErr: errors.New("request failed"), + }, + } + + for _, test := range tt { + t.Run(test.Name, func(t *testing.T) { + client, im := setupTestNodeInstanceManager(t) + + client.NextControllerGetCapabilitiesResponse = test.Capabilities + client.NextControllerGetCapabilitiesErr = test.CapabilitiesErr + + client.NextPluginProbeResponse = test.ProbeResponse + client.NextPluginProbeErr = test.ProbeErr + + info, err := im.fp.buildControllerFingerprint(context.TODO(), &structs.CSIInfo{ControllerInfo: &structs.CSIControllerInfo{}}) + + require.Equal(t, test.ExpectedControllerInfo, info.ControllerInfo) + require.Equal(t, test.ExpectedErr, err) + + require.Equal(t, test.CapabilitiesCallCount, client.ControllerGetCapabilitiesCallCount) + require.Equal(t, test.ProbeCallCount, client.PluginProbeCallCount) + }) + } +} + +func TestBuildNodeFingerprint(t *testing.T) { + tt := []struct { + Name string + + Capabilities *csi.NodeCapabilitySet + CapabilitiesErr error + CapabilitiesCallCount int64 + + ExpectedCSINodeInfo *structs.CSINodeInfo + ExpectedErr error + }{ + { + Name: "Minimal successful response", + + Capabilities: &csi.NodeCapabilitySet{}, + CapabilitiesCallCount: 1, + + ExpectedCSINodeInfo: &structs.CSINodeInfo{ + RequiresNodeStageVolume: false, + }, + }, + { + Name: "Successful response with capabilities and topologies", + + Capabilities: &csi.NodeCapabilitySet{ + HasStageUnstageVolume: true, + }, + CapabilitiesCallCount: 1, + + ExpectedCSINodeInfo: &structs.CSINodeInfo{ + RequiresNodeStageVolume: true, + }, + }, + { + Name: "NodeGetCapabilities Failed", + + CapabilitiesErr: errors.New("request failed"), + CapabilitiesCallCount: 1, + + ExpectedCSINodeInfo: &structs.CSINodeInfo{}, + ExpectedErr: errors.New("request failed"), + }, + } + + for _, test := range tt { + t.Run(test.Name, func(t *testing.T) { + client, im := setupTestNodeInstanceManager(t) + + client.NextNodeGetCapabilitiesResponse = test.Capabilities + client.NextNodeGetCapabilitiesErr = test.CapabilitiesErr + + info, err := im.fp.buildNodeFingerprint(context.TODO(), &structs.CSIInfo{NodeInfo: &structs.CSINodeInfo{}}) + + require.Equal(t, test.ExpectedCSINodeInfo, info.NodeInfo) + require.Equal(t, test.ExpectedErr, err) + + require.Equal(t, test.CapabilitiesCallCount, client.NodeGetCapabilitiesCallCount) + }) + } +} diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go new file mode 100644 index 000000000..95f88221f --- /dev/null +++ b/client/pluginmanager/csimanager/instance.go @@ -0,0 +1,157 @@ +package csimanager + +import ( + "context" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/plugins/csi" +) + +const managerFingerprintInterval = 30 * time.Second + +// instanceManager is used to manage the fingerprinting and supervision of a +// single CSI Plugin. +type instanceManager struct { + info *dynamicplugins.PluginInfo + logger hclog.Logger + + updater UpdateNodeCSIInfoFunc + + shutdownCtx context.Context + shutdownCtxCancelFn context.CancelFunc + shutdownCh chan struct{} + + // mountPoint is the root of the mount dir where plugin specific data may be + // stored and where mount points will be created + mountPoint string + + // containerMountPoint is the location _inside_ the plugin container that the + // `mountPoint` is bound in to. + containerMountPoint string + + // AllocID is the allocation id of the task group running the dynamic plugin + allocID string + + fp *pluginFingerprinter + + volumeManager *volumeManager + volumeManagerSetupCh chan struct{} + + client csi.CSIPlugin +} + +func newInstanceManager(logger hclog.Logger, updater UpdateNodeCSIInfoFunc, p *dynamicplugins.PluginInfo) *instanceManager { + ctx, cancelFn := context.WithCancel(context.Background()) + logger = logger.Named(p.Name) + return &instanceManager{ + logger: logger, + info: p, + updater: updater, + + fp: &pluginFingerprinter{ + logger: logger.Named("fingerprinter"), + info: p, + fingerprintNode: p.Type == dynamicplugins.PluginTypeCSINode, + fingerprintController: p.Type == dynamicplugins.PluginTypeCSIController, + hadFirstSuccessfulFingerprintCh: make(chan struct{}), + }, + + mountPoint: p.Options["MountPoint"], + containerMountPoint: p.Options["ContainerMountPoint"], + allocID: p.AllocID, + + volumeManagerSetupCh: make(chan struct{}), + + shutdownCtx: ctx, + shutdownCtxCancelFn: cancelFn, + shutdownCh: make(chan struct{}), + } +} + +func (i *instanceManager) run() { + c, err := csi.NewClient(i.info.ConnectionInfo.SocketPath, i.logger) + if err != nil { + i.logger.Error("failed to setup instance manager client", "error", err) + close(i.shutdownCh) + return + } + i.client = c + i.fp.client = c + + go i.setupVolumeManager() + go i.runLoop() +} + +func (i *instanceManager) setupVolumeManager() { + if i.info.Type != dynamicplugins.PluginTypeCSINode { + i.logger.Debug("not a node plugin, skipping volume manager setup", "type", i.info.Type) + return + } + + select { + case <-i.shutdownCtx.Done(): + return + case <-i.fp.hadFirstSuccessfulFingerprintCh: + i.volumeManager = newVolumeManager(i.logger, i.client, i.mountPoint, i.containerMountPoint, i.fp.requiresStaging) + i.logger.Debug("volume manager setup complete") + close(i.volumeManagerSetupCh) + return + } +} + +// VolumeMounter returns the volume manager that is configured for the given plugin +// instance. If called before the volume manager has been setup, it will block until +// the volume manager is ready or the context is closed. +func (i *instanceManager) VolumeMounter(ctx context.Context) (VolumeMounter, error) { + select { + case <-i.volumeManagerSetupCh: + return i.volumeManager, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + +func (i *instanceManager) requestCtxWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) { + return context.WithTimeout(i.shutdownCtx, timeout) +} + +func (i *instanceManager) runLoop() { + timer := time.NewTimer(0) + for { + select { + case <-i.shutdownCtx.Done(): + if i.client != nil { + i.client.Close() + i.client = nil + } + + // run one last fingerprint so that we mark the plugin as unhealthy. + // the client has been closed so this will return quickly with the + // plugin's basic info + ctx, cancelFn := i.requestCtxWithTimeout(time.Second) + info := i.fp.fingerprint(ctx) + cancelFn() + if info != nil { + i.updater(i.info.Name, info) + } + close(i.shutdownCh) + return + + case <-timer.C: + ctx, cancelFn := i.requestCtxWithTimeout(managerFingerprintInterval) + info := i.fp.fingerprint(ctx) + cancelFn() + if info != nil { + i.updater(i.info.Name, info) + } + timer.Reset(managerFingerprintInterval) + } + } +} + +func (i *instanceManager) shutdown() { + i.shutdownCtxCancelFn() + <-i.shutdownCh +} diff --git a/client/pluginmanager/csimanager/instance_test.go b/client/pluginmanager/csimanager/instance_test.go new file mode 100644 index 000000000..6a8658df5 --- /dev/null +++ b/client/pluginmanager/csimanager/instance_test.go @@ -0,0 +1,82 @@ +package csimanager + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" + "github.com/hashicorp/nomad/plugins/csi/fake" + "github.com/stretchr/testify/require" +) + +func setupTestNodeInstanceManager(t *testing.T) (*fake.Client, *instanceManager) { + tp := &fake.Client{} + + logger := testlog.HCLogger(t) + pinfo := &dynamicplugins.PluginInfo{ + Name: "test-plugin", + } + + return tp, &instanceManager{ + logger: logger, + info: pinfo, + client: tp, + fp: &pluginFingerprinter{ + logger: logger.Named("fingerprinter"), + info: pinfo, + client: tp, + fingerprintNode: true, + hadFirstSuccessfulFingerprintCh: make(chan struct{}), + }, + } +} + +func TestInstanceManager_Shutdown(t *testing.T) { + + var pluginHealth bool + var lock sync.Mutex + ctx, cancelFn := context.WithCancel(context.Background()) + client, im := setupTestNodeInstanceManager(t) + im.shutdownCtx = ctx + im.shutdownCtxCancelFn = cancelFn + im.shutdownCh = make(chan struct{}) + im.updater = func(_ string, info *structs.CSIInfo) { + fmt.Println(info) + lock.Lock() + defer lock.Unlock() + pluginHealth = info.Healthy + } + + // set up a mock successful fingerprint so that we can get + // a healthy plugin before shutting down + client.NextPluginGetCapabilitiesResponse = &csi.PluginCapabilitySet{} + client.NextPluginGetCapabilitiesErr = nil + client.NextNodeGetInfoResponse = &csi.NodeGetInfoResponse{NodeID: "foo"} + client.NextNodeGetInfoErr = nil + client.NextNodeGetCapabilitiesResponse = &csi.NodeCapabilitySet{} + client.NextNodeGetCapabilitiesErr = nil + client.NextPluginProbeResponse = true + + go im.runLoop() + + require.Eventually(t, func() bool { + lock.Lock() + defer lock.Unlock() + return pluginHealth + }, 1*time.Second, 10*time.Millisecond) + + cancelFn() // fires im.shutdown() + + require.Eventually(t, func() bool { + lock.Lock() + defer lock.Unlock() + return !pluginHealth + }, 1*time.Second, 10*time.Millisecond) + +} diff --git a/client/pluginmanager/csimanager/interface.go b/client/pluginmanager/csimanager/interface.go new file mode 100644 index 000000000..c6f97cd69 --- /dev/null +++ b/client/pluginmanager/csimanager/interface.go @@ -0,0 +1,64 @@ +package csimanager + +import ( + "context" + "errors" + "strings" + + "github.com/hashicorp/nomad/client/pluginmanager" + "github.com/hashicorp/nomad/nomad/structs" +) + +var ( + PluginNotFoundErr = errors.New("Plugin not found") +) + +type MountInfo struct { + Source string + IsDevice bool +} + +type UsageOptions struct { + ReadOnly bool + AttachmentMode string + AccessMode string + MountOptions *structs.CSIMountOptions +} + +// ToFS is used by a VolumeManager to construct the path to where a volume +// should be staged/published. It should always return a string that is easy +// enough to manage as a filesystem path segment (e.g avoid starting the string +// with a special character). +func (u *UsageOptions) ToFS() string { + var sb strings.Builder + + if u.ReadOnly { + sb.WriteString("ro-") + } else { + sb.WriteString("rw-") + } + + sb.WriteString(u.AttachmentMode) + sb.WriteString("-") + sb.WriteString(u.AccessMode) + + return sb.String() +} + +type VolumeMounter interface { + MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usageOpts *UsageOptions, publishContext map[string]string) (*MountInfo, error) + UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usageOpts *UsageOptions) error +} + +type Manager interface { + // PluginManager returns a PluginManager for use by the node fingerprinter. + PluginManager() pluginmanager.PluginManager + + // MounterForVolume returns a VolumeMounter for the given requested volume. + // If there is no plugin registered for this volume type, a PluginNotFoundErr + // will be returned. + MounterForVolume(ctx context.Context, volume *structs.CSIVolume) (VolumeMounter, error) + + // Shutdown shuts down the Manager and unmounts any locally attached volumes. + Shutdown() +} diff --git a/client/pluginmanager/csimanager/manager.go b/client/pluginmanager/csimanager/manager.go new file mode 100644 index 000000000..60b6b309b --- /dev/null +++ b/client/pluginmanager/csimanager/manager.go @@ -0,0 +1,225 @@ +package csimanager + +import ( + "context" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/client/pluginmanager" + "github.com/hashicorp/nomad/nomad/structs" +) + +// defaultPluginResyncPeriod is the time interval used to do a full resync +// against the dynamicplugins, to account for missed updates. +const defaultPluginResyncPeriod = 30 * time.Second + +// UpdateNodeCSIInfoFunc is the callback used to update the node from +// fingerprinting +type UpdateNodeCSIInfoFunc func(string, *structs.CSIInfo) + +type Config struct { + Logger hclog.Logger + DynamicRegistry dynamicplugins.Registry + UpdateNodeCSIInfoFunc UpdateNodeCSIInfoFunc + PluginResyncPeriod time.Duration +} + +// New returns a new PluginManager that will handle managing CSI plugins from +// the dynamicRegistry from the provided Config. +func New(config *Config) Manager { + // Use a dedicated internal context for managing plugin shutdown. + ctx, cancelFn := context.WithCancel(context.Background()) + if config.PluginResyncPeriod == 0 { + config.PluginResyncPeriod = defaultPluginResyncPeriod + } + + return &csiManager{ + logger: config.Logger, + registry: config.DynamicRegistry, + instances: make(map[string]map[string]*instanceManager), + + updateNodeCSIInfoFunc: config.UpdateNodeCSIInfoFunc, + pluginResyncPeriod: config.PluginResyncPeriod, + + shutdownCtx: ctx, + shutdownCtxCancelFn: cancelFn, + shutdownCh: make(chan struct{}), + } +} + +type csiManager struct { + // instances should only be accessed from the run() goroutine and the shutdown + // fn. It is a map of PluginType : [PluginName : instanceManager] + instances map[string]map[string]*instanceManager + + registry dynamicplugins.Registry + logger hclog.Logger + pluginResyncPeriod time.Duration + + updateNodeCSIInfoFunc UpdateNodeCSIInfoFunc + + shutdownCtx context.Context + shutdownCtxCancelFn context.CancelFunc + shutdownCh chan struct{} +} + +func (c *csiManager) PluginManager() pluginmanager.PluginManager { + return c +} + +func (c *csiManager) MounterForVolume(ctx context.Context, vol *structs.CSIVolume) (VolumeMounter, error) { + nodePlugins, hasAnyNodePlugins := c.instances["csi-node"] + if !hasAnyNodePlugins { + return nil, PluginNotFoundErr + } + + mgr, hasPlugin := nodePlugins[vol.PluginID] + if !hasPlugin { + return nil, PluginNotFoundErr + } + + return mgr.VolumeMounter(ctx) +} + +// Run starts a plugin manager and should return early +func (c *csiManager) Run() { + go c.runLoop() +} + +func (c *csiManager) runLoop() { + timer := time.NewTimer(0) // ensure we sync immediately in first pass + controllerUpdates := c.registry.PluginsUpdatedCh(c.shutdownCtx, "csi-controller") + nodeUpdates := c.registry.PluginsUpdatedCh(c.shutdownCtx, "csi-node") + for { + select { + case <-timer.C: + c.resyncPluginsFromRegistry("csi-controller") + c.resyncPluginsFromRegistry("csi-node") + timer.Reset(c.pluginResyncPeriod) + case event := <-controllerUpdates: + c.handlePluginEvent(event) + case event := <-nodeUpdates: + c.handlePluginEvent(event) + case <-c.shutdownCtx.Done(): + close(c.shutdownCh) + return + } + } +} + +// resyncPluginsFromRegistry does a full sync of the running instance +// managers against those in the registry. we primarily will use update +// events from the registry. +func (c *csiManager) resyncPluginsFromRegistry(ptype string) { + plugins := c.registry.ListPlugins(ptype) + seen := make(map[string]struct{}, len(plugins)) + + // For every plugin in the registry, ensure that we have an existing plugin + // running. Also build the map of valid plugin names. + // Note: monolith plugins that run as both controllers and nodes get a + // separate instance manager for both modes. + for _, plugin := range plugins { + seen[plugin.Name] = struct{}{} + c.ensureInstance(plugin) + } + + // For every instance manager, if we did not find it during the plugin + // iterator, shut it down and remove it from the table. + instances := c.instancesForType(ptype) + for name, mgr := range instances { + if _, ok := seen[name]; !ok { + c.ensureNoInstance(mgr.info) + } + } +} + +// handlePluginEvent syncs a single event against the plugin registry +func (c *csiManager) handlePluginEvent(event *dynamicplugins.PluginUpdateEvent) { + if event == nil { + return + } + c.logger.Trace("dynamic plugin event", + "event", event.EventType, + "plugin_id", event.Info.Name, + "plugin_alloc_id", event.Info.AllocID) + + switch event.EventType { + case dynamicplugins.EventTypeRegistered: + c.ensureInstance(event.Info) + case dynamicplugins.EventTypeDeregistered: + c.ensureNoInstance(event.Info) + default: + c.logger.Error("received unknown dynamic plugin event type", + "type", event.EventType) + } +} + +// Ensure we have an instance manager for the plugin and add it to +// the CSI manager's tracking table for that plugin type. +func (c *csiManager) ensureInstance(plugin *dynamicplugins.PluginInfo) { + name := plugin.Name + ptype := plugin.Type + instances := c.instancesForType(ptype) + if _, ok := instances[name]; !ok { + c.logger.Debug("detected new CSI plugin", "name", name, "type", ptype) + mgr := newInstanceManager(c.logger, c.updateNodeCSIInfoFunc, plugin) + instances[name] = mgr + mgr.run() + } +} + +// Shut down the instance manager for a plugin and remove it from +// the CSI manager's tracking table for that plugin type. +func (c *csiManager) ensureNoInstance(plugin *dynamicplugins.PluginInfo) { + name := plugin.Name + ptype := plugin.Type + instances := c.instancesForType(ptype) + if mgr, ok := instances[name]; ok { + c.logger.Debug("shutting down CSI plugin", "name", name, "type", ptype) + mgr.shutdown() + delete(instances, name) + } +} + +// Get the instance managers table for a specific plugin type, +// ensuring it's been initialized if it doesn't exist. +func (c *csiManager) instancesForType(ptype string) map[string]*instanceManager { + pluginMap, ok := c.instances[ptype] + if !ok { + pluginMap = make(map[string]*instanceManager) + c.instances[ptype] = pluginMap + } + return pluginMap +} + +// Shutdown should gracefully shutdown all plugins managed by the manager. +// It must block until shutdown is complete +func (c *csiManager) Shutdown() { + // Shut down the run loop + c.shutdownCtxCancelFn() + + // Wait for plugin manager shutdown to complete so that we + // don't try to shutdown instance managers while runLoop is + // doing a resync + <-c.shutdownCh + + // Shutdown all the instance managers in parallel + var wg sync.WaitGroup + for _, pluginMap := range c.instances { + for _, mgr := range pluginMap { + wg.Add(1) + go func(mgr *instanceManager) { + mgr.shutdown() + wg.Done() + }(mgr) + } + } + wg.Wait() +} + +// PluginType is the type of plugin which the manager manages +func (c *csiManager) PluginType() string { + return "csi" +} diff --git a/client/pluginmanager/csimanager/manager_test.go b/client/pluginmanager/csimanager/manager_test.go new file mode 100644 index 000000000..f6c3f381d --- /dev/null +++ b/client/pluginmanager/csimanager/manager_test.go @@ -0,0 +1,161 @@ +package csimanager + +import ( + "testing" + "time" + + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/client/pluginmanager" + "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" +) + +var _ pluginmanager.PluginManager = (*csiManager)(nil) + +var fakePlugin = &dynamicplugins.PluginInfo{ + Name: "my-plugin", + Type: "csi-controller", + ConnectionInfo: &dynamicplugins.PluginConnectionInfo{}, +} + +func setupRegistry() dynamicplugins.Registry { + return dynamicplugins.NewRegistry( + nil, + map[string]dynamicplugins.PluginDispenser{ + "csi-controller": func(*dynamicplugins.PluginInfo) (interface{}, error) { + return nil, nil + }, + }) +} + +func TestManager_Setup_Shutdown(t *testing.T) { + r := setupRegistry() + defer r.Shutdown() + + cfg := &Config{ + Logger: testlog.HCLogger(t), + DynamicRegistry: r, + UpdateNodeCSIInfoFunc: func(string, *structs.CSIInfo) {}, + } + pm := New(cfg).(*csiManager) + pm.Run() + pm.Shutdown() +} + +func TestManager_RegisterPlugin(t *testing.T) { + registry := setupRegistry() + defer registry.Shutdown() + + require.NotNil(t, registry) + + cfg := &Config{ + Logger: testlog.HCLogger(t), + DynamicRegistry: registry, + UpdateNodeCSIInfoFunc: func(string, *structs.CSIInfo) {}, + } + pm := New(cfg).(*csiManager) + defer pm.Shutdown() + + require.NotNil(t, pm.registry) + + err := registry.RegisterPlugin(fakePlugin) + require.Nil(t, err) + + pm.Run() + + require.Eventually(t, func() bool { + pmap, ok := pm.instances[fakePlugin.Type] + if !ok { + return false + } + + _, ok = pmap[fakePlugin.Name] + return ok + }, 5*time.Second, 10*time.Millisecond) +} + +func TestManager_DeregisterPlugin(t *testing.T) { + registry := setupRegistry() + defer registry.Shutdown() + + require.NotNil(t, registry) + + cfg := &Config{ + Logger: testlog.HCLogger(t), + DynamicRegistry: registry, + UpdateNodeCSIInfoFunc: func(string, *structs.CSIInfo) {}, + PluginResyncPeriod: 500 * time.Millisecond, + } + pm := New(cfg).(*csiManager) + defer pm.Shutdown() + + require.NotNil(t, pm.registry) + + err := registry.RegisterPlugin(fakePlugin) + require.Nil(t, err) + + pm.Run() + + require.Eventually(t, func() bool { + _, ok := pm.instances[fakePlugin.Type][fakePlugin.Name] + return ok + }, 5*time.Second, 10*time.Millisecond) + + err = registry.DeregisterPlugin(fakePlugin.Type, fakePlugin.Name) + require.Nil(t, err) + + require.Eventually(t, func() bool { + _, ok := pm.instances[fakePlugin.Type][fakePlugin.Name] + return !ok + }, 5*time.Second, 10*time.Millisecond) +} + +// TestManager_MultiplePlugins ensures that multiple plugins with the same +// name but different types (as found with monolith plugins) don't interfere +// with each other. +func TestManager_MultiplePlugins(t *testing.T) { + registry := setupRegistry() + defer registry.Shutdown() + + require.NotNil(t, registry) + + cfg := &Config{ + Logger: testlog.HCLogger(t), + DynamicRegistry: registry, + UpdateNodeCSIInfoFunc: func(string, *structs.CSIInfo) {}, + PluginResyncPeriod: 500 * time.Millisecond, + } + pm := New(cfg).(*csiManager) + defer pm.Shutdown() + + require.NotNil(t, pm.registry) + + err := registry.RegisterPlugin(fakePlugin) + require.Nil(t, err) + + fakeNodePlugin := *fakePlugin + fakeNodePlugin.Type = "csi-node" + err = registry.RegisterPlugin(&fakeNodePlugin) + require.Nil(t, err) + + pm.Run() + + require.Eventually(t, func() bool { + _, ok := pm.instances[fakePlugin.Type][fakePlugin.Name] + return ok + }, 5*time.Second, 10*time.Millisecond) + + require.Eventually(t, func() bool { + _, ok := pm.instances[fakeNodePlugin.Type][fakeNodePlugin.Name] + return ok + }, 5*time.Second, 10*time.Millisecond) + + err = registry.DeregisterPlugin(fakePlugin.Type, fakePlugin.Name) + require.Nil(t, err) + + require.Eventually(t, func() bool { + _, ok := pm.instances[fakePlugin.Type][fakePlugin.Name] + return !ok + }, 5*time.Second, 10*time.Millisecond) +} diff --git a/client/pluginmanager/csimanager/usage_tracker.go b/client/pluginmanager/csimanager/usage_tracker.go new file mode 100644 index 000000000..df97d906e --- /dev/null +++ b/client/pluginmanager/csimanager/usage_tracker.go @@ -0,0 +1,71 @@ +package csimanager + +import ( + "sync" + + "github.com/hashicorp/nomad/nomad/structs" +) + +// volumeUsageTracker tracks the allocations that depend on a given volume +type volumeUsageTracker struct { + // state is a map of volumeUsageKey to a slice of allocation ids + state map[volumeUsageKey][]string + stateMu sync.Mutex +} + +func newVolumeUsageTracker() *volumeUsageTracker { + return &volumeUsageTracker{ + state: make(map[volumeUsageKey][]string), + } +} + +type volumeUsageKey struct { + volume *structs.CSIVolume + usageOpts UsageOptions +} + +func (v *volumeUsageTracker) allocsForKey(key volumeUsageKey) []string { + return v.state[key] +} + +func (v *volumeUsageTracker) appendAlloc(key volumeUsageKey, alloc *structs.Allocation) { + allocs := v.allocsForKey(key) + allocs = append(allocs, alloc.ID) + v.state[key] = allocs +} + +func (v *volumeUsageTracker) removeAlloc(key volumeUsageKey, needle *structs.Allocation) { + allocs := v.allocsForKey(key) + var newAllocs []string + for _, allocID := range allocs { + if allocID != needle.ID { + newAllocs = append(newAllocs, allocID) + } + } + + if len(newAllocs) == 0 { + delete(v.state, key) + } else { + v.state[key] = newAllocs + } +} + +func (v *volumeUsageTracker) Claim(alloc *structs.Allocation, volume *structs.CSIVolume, usage *UsageOptions) { + v.stateMu.Lock() + defer v.stateMu.Unlock() + + key := volumeUsageKey{volume: volume, usageOpts: *usage} + v.appendAlloc(key, alloc) +} + +// Free removes the allocation from the state list for the given alloc. If the +// alloc is the last allocation for the volume then it returns true. +func (v *volumeUsageTracker) Free(alloc *structs.Allocation, volume *structs.CSIVolume, usage *UsageOptions) bool { + v.stateMu.Lock() + defer v.stateMu.Unlock() + + key := volumeUsageKey{volume: volume, usageOpts: *usage} + v.removeAlloc(key, alloc) + allocs := v.allocsForKey(key) + return len(allocs) == 0 +} diff --git a/client/pluginmanager/csimanager/usage_tracker_test.go b/client/pluginmanager/csimanager/usage_tracker_test.go new file mode 100644 index 000000000..21545e96e --- /dev/null +++ b/client/pluginmanager/csimanager/usage_tracker_test.go @@ -0,0 +1,62 @@ +package csimanager + +import ( + "testing" + + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" +) + +func TestUsageTracker(t *testing.T) { + mockAllocs := []*structs.Allocation{ + mock.Alloc(), + mock.Alloc(), + mock.Alloc(), + mock.Alloc(), + mock.Alloc(), + } + + cases := []struct { + Name string + + RegisterAllocs []*structs.Allocation + FreeAllocs []*structs.Allocation + + ExpectedResult bool + }{ + { + Name: "Register and deregister all allocs", + RegisterAllocs: mockAllocs, + FreeAllocs: mockAllocs, + ExpectedResult: true, + }, + { + Name: "Register all and deregister partial allocs", + RegisterAllocs: mockAllocs, + FreeAllocs: mockAllocs[0:3], + ExpectedResult: false, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + tracker := newVolumeUsageTracker() + + volume := &structs.CSIVolume{ + ID: "foo", + } + for _, alloc := range tc.RegisterAllocs { + tracker.Claim(alloc, volume, &UsageOptions{}) + } + + result := false + + for _, alloc := range tc.FreeAllocs { + result = tracker.Free(alloc, volume, &UsageOptions{}) + } + + require.Equal(t, tc.ExpectedResult, result, "Tracker State: %#v", tracker.state) + }) + } +} diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go new file mode 100644 index 000000000..6243af4b6 --- /dev/null +++ b/client/pluginmanager/csimanager/volume.go @@ -0,0 +1,319 @@ +package csimanager + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" + + grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/helper/mount" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" +) + +var _ VolumeMounter = &volumeManager{} + +const ( + DefaultMountActionTimeout = 2 * time.Minute + StagingDirName = "staging" + AllocSpecificDirName = "per-alloc" +) + +// volumeManager handles the state of attached volumes for a given CSI Plugin. +// +// volumeManagers outlive the lifetime of a given allocation as volumes may be +// shared by multiple allocations on the same node. +// +// volumes are stored by an enriched volume usage struct as the CSI Spec requires +// slightly different usage based on the given usage model. +type volumeManager struct { + logger hclog.Logger + plugin csi.CSIPlugin + + usageTracker *volumeUsageTracker + + // mountRoot is the root of where plugin directories and mounts may be created + // e.g /opt/nomad.d/statedir/csi/my-csi-plugin/ + mountRoot string + + // containerMountPoint is the location _inside_ the plugin container that the + // `mountRoot` is bound in to. + containerMountPoint string + + // requiresStaging shows whether the plugin requires that the volume manager + // calls NodeStageVolume and NodeUnstageVolume RPCs during setup and teardown + requiresStaging bool +} + +func newVolumeManager(logger hclog.Logger, plugin csi.CSIPlugin, rootDir, containerRootDir string, requiresStaging bool) *volumeManager { + return &volumeManager{ + logger: logger.Named("volume_manager"), + plugin: plugin, + mountRoot: rootDir, + containerMountPoint: containerRootDir, + requiresStaging: requiresStaging, + usageTracker: newVolumeUsageTracker(), + } +} + +func (v *volumeManager) stagingDirForVolume(root string, vol *structs.CSIVolume, usage *UsageOptions) string { + return filepath.Join(root, StagingDirName, vol.ID, usage.ToFS()) +} + +func (v *volumeManager) allocDirForVolume(root string, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) string { + return filepath.Join(root, AllocSpecificDirName, alloc.ID, vol.ID, usage.ToFS()) +} + +// ensureStagingDir attempts to create a directory for use when staging a volume +// and then validates that the path is not already a mount point for e.g an +// existing volume stage. +// +// Returns whether the directory is a pre-existing mountpoint, the staging path, +// and any errors that occurred. +func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume, usage *UsageOptions) (string, bool, error) { + stagingPath := v.stagingDirForVolume(v.mountRoot, vol, usage) + + // Make the staging path, owned by the Nomad User + if err := os.MkdirAll(stagingPath, 0700); err != nil && !os.IsExist(err) { + return "", false, fmt.Errorf("failed to create staging directory for volume (%s): %v", vol.ID, err) + + } + + // Validate that it is not already a mount point + m := mount.New() + isNotMount, err := m.IsNotAMountPoint(stagingPath) + if err != nil { + return "", false, fmt.Errorf("mount point detection failed for volume (%s): %v", vol.ID, err) + } + + return stagingPath, !isNotMount, nil +} + +// ensureAllocDir attempts to create a directory for use when publishing a volume +// and then validates that the path is not already a mount point (e.g when reattaching +// to existing allocs). +// +// Returns whether the directory is a pre-existing mountpoint, the publish path, +// and any errors that occurred. +func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) (string, bool, error) { + allocPath := v.allocDirForVolume(v.mountRoot, vol, alloc, usage) + + // Make the alloc path, owned by the Nomad User + if err := os.MkdirAll(allocPath, 0700); err != nil && !os.IsExist(err) { + return "", false, fmt.Errorf("failed to create allocation directory for volume (%s): %v", vol.ID, err) + } + + // Validate that it is not already a mount point + m := mount.New() + isNotMount, err := m.IsNotAMountPoint(allocPath) + if err != nil { + return "", false, fmt.Errorf("mount point detection failed for volume (%s): %v", vol.ID, err) + } + + return allocPath, !isNotMount, nil +} + +func volumeCapability(vol *structs.CSIVolume, usage *UsageOptions) (*csi.VolumeCapability, error) { + capability, err := csi.VolumeCapabilityFromStructs(vol.AttachmentMode, vol.AccessMode) + if err != nil { + return nil, err + } + + var opts *structs.CSIMountOptions + if vol.MountOptions == nil { + opts = usage.MountOptions + } else { + opts = vol.MountOptions.Copy() + opts.Merge(usage.MountOptions) + } + + capability.MountVolume = opts + + return capability, nil +} + +// stageVolume prepares a volume for use by allocations. When a plugin exposes +// the STAGE_UNSTAGE_VOLUME capability it MUST be called once-per-volume for a +// given usage mode before the volume can be NodePublish-ed. +func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume, usage *UsageOptions, publishContext map[string]string) error { + logger := hclog.FromContext(ctx) + logger.Trace("Preparing volume staging environment") + hostStagingPath, isMount, err := v.ensureStagingDir(vol, usage) + if err != nil { + return err + } + pluginStagingPath := v.stagingDirForVolume(v.containerMountPoint, vol, usage) + + logger.Trace("Volume staging environment", "pre-existing_mount", isMount, "host_staging_path", hostStagingPath, "plugin_staging_path", pluginStagingPath) + + if isMount { + logger.Debug("re-using existing staging mount for volume", "staging_path", hostStagingPath) + return nil + } + + capability, err := volumeCapability(vol, usage) + if err != nil { + return err + } + + // We currently treat all explicit CSI NodeStageVolume errors (aside from timeouts, codes.ResourceExhausted, and codes.Unavailable) + // as fatal. + // In the future, we can provide more useful error messages based on + // different types of error. For error documentation see: + // https://github.com/container-storage-interface/spec/blob/4731db0e0bc53238b93850f43ab05d9355df0fd9/spec.md#nodestagevolume-errors + return v.plugin.NodeStageVolume(ctx, + vol.ID, + publishContext, + pluginStagingPath, + capability, + grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), + grpc_retry.WithMax(3), + grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), + ) +} + +func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions, publishContext map[string]string) (*MountInfo, error) { + logger := hclog.FromContext(ctx) + var pluginStagingPath string + if v.requiresStaging { + pluginStagingPath = v.stagingDirForVolume(v.containerMountPoint, vol, usage) + } + + hostTargetPath, isMount, err := v.ensureAllocDir(vol, alloc, usage) + if err != nil { + return nil, err + } + pluginTargetPath := v.allocDirForVolume(v.containerMountPoint, vol, alloc, usage) + + if isMount { + logger.Debug("Re-using existing published volume for allocation") + return &MountInfo{Source: hostTargetPath}, nil + } + + capabilities, err := volumeCapability(vol, usage) + if err != nil { + return nil, err + } + + err = v.plugin.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{ + VolumeID: vol.RemoteID(), + PublishContext: publishContext, + StagingTargetPath: pluginStagingPath, + TargetPath: pluginTargetPath, + VolumeCapability: capabilities, + Readonly: usage.ReadOnly, + }, + grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), + grpc_retry.WithMax(3), + grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), + ) + + return &MountInfo{Source: hostTargetPath}, err +} + +// MountVolume performs the steps required for using a given volume +// configuration for the provided allocation. +// It is passed the publishContext from remote attachment, and specific usage +// modes from the CSI Hook. +// It then uses this state to stage and publish the volume as required for use +// by the given allocation. +func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions, publishContext map[string]string) (*MountInfo, error) { + logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) + ctx = hclog.WithContext(ctx, logger) + + if v.requiresStaging { + if err := v.stageVolume(ctx, vol, usage, publishContext); err != nil { + return nil, err + } + } + + mountInfo, err := v.publishVolume(ctx, vol, alloc, usage, publishContext) + if err != nil { + return nil, err + } + + v.usageTracker.Claim(alloc, vol, usage) + + return mountInfo, nil +} + +// unstageVolume is the inverse operation of `stageVolume` and must be called +// once for each staging path that a volume has been staged under. +// It is safe to call multiple times and a plugin is required to return OK if +// the volume has been unstaged or was never staged on the node. +func (v *volumeManager) unstageVolume(ctx context.Context, vol *structs.CSIVolume, usage *UsageOptions) error { + logger := hclog.FromContext(ctx) + logger.Trace("Unstaging volume") + stagingPath := v.stagingDirForVolume(v.containerMountPoint, vol, usage) + return v.plugin.NodeUnstageVolume(ctx, + vol.ID, + stagingPath, + grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), + grpc_retry.WithMax(3), + grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), + ) +} + +func combineErrors(maybeErrs ...error) error { + var result *multierror.Error + for _, err := range maybeErrs { + if err == nil { + continue + } + + result = multierror.Append(result, err) + } + + return result.ErrorOrNil() +} + +func (v *volumeManager) unpublishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) error { + pluginTargetPath := v.allocDirForVolume(v.containerMountPoint, vol, alloc, usage) + + rpcErr := v.plugin.NodeUnpublishVolume(ctx, vol.ID, pluginTargetPath, + grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), + grpc_retry.WithMax(3), + grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), + ) + + hostTargetPath := v.allocDirForVolume(v.mountRoot, vol, alloc, usage) + if _, err := os.Stat(hostTargetPath); os.IsNotExist(err) { + // Host Target Path already got destroyed, just return any rpcErr + return rpcErr + } + + // Host Target Path was not cleaned up, attempt to do so here. If it's still + // a mount then removing the dir will fail and we'll return any rpcErr and the + // file error. + rmErr := os.Remove(hostTargetPath) + if rmErr != nil { + return combineErrors(rpcErr, rmErr) + } + + // We successfully removed the directory, return any rpcErrors that were + // encountered, but because we got here, they were probably flaky or was + // cleaned up externally. We might want to just return `nil` here in the + // future. + return rpcErr +} + +func (v *volumeManager) UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) error { + logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) + ctx = hclog.WithContext(ctx, logger) + + err := v.unpublishVolume(ctx, vol, alloc, usage) + if err != nil { + return err + } + + canRelease := v.usageTracker.Free(alloc, vol, usage) + if !v.requiresStaging || !canRelease { + return nil + } + + return v.unstageVolume(ctx, vol, usage) +} diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go new file mode 100644 index 000000000..93aa2447a --- /dev/null +++ b/client/pluginmanager/csimanager/volume_test.go @@ -0,0 +1,424 @@ +package csimanager + +import ( + "context" + "errors" + "io/ioutil" + "os" + "runtime" + "testing" + + "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" + csifake "github.com/hashicorp/nomad/plugins/csi/fake" + "github.com/stretchr/testify/require" +) + +func tmpDir(t testing.TB) string { + t.Helper() + dir, err := ioutil.TempDir("", "nomad") + require.NoError(t, err) + return dir +} + +func TestVolumeManager_ensureStagingDir(t *testing.T) { + t.Parallel() + + cases := []struct { + Name string + Volume *structs.CSIVolume + UsageOptions *UsageOptions + CreateDirAheadOfTime bool + MountDirAheadOfTime bool + + ExpectedErr error + ExpectedMountState bool + }{ + { + Name: "Creates a directory when one does not exist", + Volume: &structs.CSIVolume{ID: "foo"}, + UsageOptions: &UsageOptions{}, + }, + { + Name: "Does not fail because of a pre-existing directory", + Volume: &structs.CSIVolume{ID: "foo"}, + UsageOptions: &UsageOptions{}, + CreateDirAheadOfTime: true, + }, + { + Name: "Returns negative mount info", + UsageOptions: &UsageOptions{}, + Volume: &structs.CSIVolume{ID: "foo"}, + }, + { + Name: "Returns positive mount info", + Volume: &structs.CSIVolume{ID: "foo"}, + UsageOptions: &UsageOptions{}, + CreateDirAheadOfTime: true, + MountDirAheadOfTime: true, + ExpectedMountState: true, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + // Step 1: Validate that the test case makes sense + if !tc.CreateDirAheadOfTime && tc.MountDirAheadOfTime { + require.Fail(t, "Cannot Mount without creating a dir") + } + + if tc.MountDirAheadOfTime { + // We can enable these tests by either mounting a fake device on linux + // e.g shipping a small ext4 image file and using that as a loopback + // device, but there's no convenient way to implement this. + t.Skip("TODO: Skipped because we don't detect bind mounts") + } + + // Step 2: Test Setup + tmpPath := tmpDir(t) + defer os.RemoveAll(tmpPath) + + csiFake := &csifake.Client{} + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) + expectedStagingPath := manager.stagingDirForVolume(tmpPath, tc.Volume, tc.UsageOptions) + + if tc.CreateDirAheadOfTime { + err := os.MkdirAll(expectedStagingPath, 0700) + require.NoError(t, err) + } + + // Step 3: Now we can do some testing + + path, detectedMount, testErr := manager.ensureStagingDir(tc.Volume, tc.UsageOptions) + if tc.ExpectedErr != nil { + require.EqualError(t, testErr, tc.ExpectedErr.Error()) + return // We don't perform extra validation if an error was detected. + } + + require.NoError(t, testErr) + require.Equal(t, tc.ExpectedMountState, detectedMount) + + // If the ensureStagingDir call had to create a directory itself, then here + // we validate that the directory exists and its permissions + if !tc.CreateDirAheadOfTime { + file, err := os.Lstat(path) + require.NoError(t, err) + require.True(t, file.IsDir()) + + // TODO: Figure out a windows equivalent of this test + if runtime.GOOS != "windows" { + require.Equal(t, os.FileMode(0700), file.Mode().Perm()) + } + } + }) + } +} + +func TestVolumeManager_stageVolume(t *testing.T) { + t.Parallel() + cases := []struct { + Name string + Volume *structs.CSIVolume + UsageOptions *UsageOptions + PluginErr error + ExpectedErr error + }{ + { + Name: "Returns an error when an invalid AttachmentMode is provided", + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: "nonsense", + }, + UsageOptions: &UsageOptions{}, + ExpectedErr: errors.New("Unknown volume attachment mode: nonsense"), + }, + { + Name: "Returns an error when an invalid AccessMode is provided", + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, + AccessMode: "nonsense", + }, + UsageOptions: &UsageOptions{}, + ExpectedErr: errors.New("Unknown volume access mode: nonsense"), + }, + { + Name: "Returns an error when the plugin returns an error", + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + }, + UsageOptions: &UsageOptions{}, + PluginErr: errors.New("Some Unknown Error"), + ExpectedErr: errors.New("Some Unknown Error"), + }, + { + Name: "Happy Path", + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + }, + UsageOptions: &UsageOptions{}, + PluginErr: nil, + ExpectedErr: nil, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + tmpPath := tmpDir(t) + defer os.RemoveAll(tmpPath) + + csiFake := &csifake.Client{} + csiFake.NextNodeStageVolumeErr = tc.PluginErr + + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) + ctx := context.Background() + + err := manager.stageVolume(ctx, tc.Volume, tc.UsageOptions, nil) + + if tc.ExpectedErr != nil { + require.EqualError(t, err, tc.ExpectedErr.Error()) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestVolumeManager_unstageVolume(t *testing.T) { + t.Parallel() + cases := []struct { + Name string + Volume *structs.CSIVolume + UsageOptions *UsageOptions + PluginErr error + ExpectedErr error + ExpectedCSICallCount int64 + }{ + { + Name: "Returns an error when the plugin returns an error", + Volume: &structs.CSIVolume{ + ID: "foo", + }, + UsageOptions: &UsageOptions{}, + PluginErr: errors.New("Some Unknown Error"), + ExpectedErr: errors.New("Some Unknown Error"), + ExpectedCSICallCount: 1, + }, + { + Name: "Happy Path", + Volume: &structs.CSIVolume{ + ID: "foo", + }, + UsageOptions: &UsageOptions{}, + PluginErr: nil, + ExpectedErr: nil, + ExpectedCSICallCount: 1, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + tmpPath := tmpDir(t) + defer os.RemoveAll(tmpPath) + + csiFake := &csifake.Client{} + csiFake.NextNodeUnstageVolumeErr = tc.PluginErr + + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) + ctx := context.Background() + + err := manager.unstageVolume(ctx, tc.Volume, tc.UsageOptions) + + if tc.ExpectedErr != nil { + require.EqualError(t, err, tc.ExpectedErr.Error()) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodeUnstageVolumeCallCount) + }) + } +} + +func TestVolumeManager_publishVolume(t *testing.T) { + t.Parallel() + cases := []struct { + Name string + Allocation *structs.Allocation + Volume *structs.CSIVolume + UsageOptions *UsageOptions + PluginErr error + ExpectedErr error + ExpectedCSICallCount int64 + ExpectedVolumeCapability *csi.VolumeCapability + }{ + { + Name: "Returns an error when the plugin returns an error", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + }, + UsageOptions: &UsageOptions{}, + PluginErr: errors.New("Some Unknown Error"), + ExpectedErr: errors.New("Some Unknown Error"), + ExpectedCSICallCount: 1, + }, + { + Name: "Happy Path", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + }, + UsageOptions: &UsageOptions{}, + PluginErr: nil, + ExpectedErr: nil, + ExpectedCSICallCount: 1, + }, + { + Name: "Mount options in the volume", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + MountOptions: &structs.CSIMountOptions{ + MountFlags: []string{"ro"}, + }, + }, + UsageOptions: &UsageOptions{}, + PluginErr: nil, + ExpectedErr: nil, + ExpectedCSICallCount: 1, + ExpectedVolumeCapability: &csi.VolumeCapability{ + AccessType: csi.VolumeAccessTypeMount, + AccessMode: csi.VolumeAccessModeMultiNodeMultiWriter, + MountVolume: &structs.CSIMountOptions{ + MountFlags: []string{"ro"}, + }, + }, + }, + { + Name: "Mount options override in the request", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + MountOptions: &structs.CSIMountOptions{ + MountFlags: []string{"ro"}, + }, + }, + UsageOptions: &UsageOptions{ + MountOptions: &structs.CSIMountOptions{ + MountFlags: []string{"rw"}, + }, + }, + PluginErr: nil, + ExpectedErr: nil, + ExpectedCSICallCount: 1, + ExpectedVolumeCapability: &csi.VolumeCapability{ + AccessType: csi.VolumeAccessTypeMount, + AccessMode: csi.VolumeAccessModeMultiNodeMultiWriter, + MountVolume: &structs.CSIMountOptions{ + MountFlags: []string{"rw"}, + }, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + tmpPath := tmpDir(t) + defer os.RemoveAll(tmpPath) + + csiFake := &csifake.Client{} + csiFake.NextNodePublishVolumeErr = tc.PluginErr + + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) + ctx := context.Background() + + _, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation, tc.UsageOptions, nil) + + if tc.ExpectedErr != nil { + require.EqualError(t, err, tc.ExpectedErr.Error()) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodePublishVolumeCallCount) + + if tc.ExpectedVolumeCapability != nil { + require.Equal(t, tc.ExpectedVolumeCapability, csiFake.PrevVolumeCapability) + } + + }) + } +} + +func TestVolumeManager_unpublishVolume(t *testing.T) { + t.Parallel() + cases := []struct { + Name string + Allocation *structs.Allocation + Volume *structs.CSIVolume + UsageOptions *UsageOptions + PluginErr error + ExpectedErr error + ExpectedCSICallCount int64 + }{ + { + Name: "Returns an error when the plugin returns an error", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + }, + UsageOptions: &UsageOptions{}, + PluginErr: errors.New("Some Unknown Error"), + ExpectedErr: errors.New("Some Unknown Error"), + ExpectedCSICallCount: 1, + }, + { + Name: "Happy Path", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + }, + UsageOptions: &UsageOptions{}, + PluginErr: nil, + ExpectedErr: nil, + ExpectedCSICallCount: 1, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + tmpPath := tmpDir(t) + defer os.RemoveAll(tmpPath) + + csiFake := &csifake.Client{} + csiFake.NextNodeUnpublishVolumeErr = tc.PluginErr + + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) + ctx := context.Background() + + err := manager.unpublishVolume(ctx, tc.Volume, tc.Allocation, tc.UsageOptions) + + if tc.ExpectedErr != nil { + require.EqualError(t, err, tc.ExpectedErr.Error()) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodeUnpublishVolumeCallCount) + }) + } +} diff --git a/client/pluginmanager/drivermanager/state/state.go b/client/pluginmanager/drivermanager/state/state.go index 529499cff..f37717c3b 100644 --- a/client/pluginmanager/drivermanager/state/state.go +++ b/client/pluginmanager/drivermanager/state/state.go @@ -2,10 +2,10 @@ package state import pstructs "github.com/hashicorp/nomad/plugins/shared/structs" -// PluginState is used to store the driver managers state across restarts of the +// PluginState is used to store the driver manager's state across restarts of the // agent type PluginState struct { - // ReattachConfigs are the set of reattach configs for plugin's launched by + // ReattachConfigs are the set of reattach configs for plugins launched by // the driver manager ReattachConfigs map[string]*pstructs.ReattachConfig } diff --git a/client/rpc.go b/client/rpc.go index b502587cf..04ee73c44 100644 --- a/client/rpc.go +++ b/client/rpc.go @@ -20,10 +20,11 @@ import ( // rpcEndpoints holds the RPC endpoints type rpcEndpoints struct { - ClientStats *ClientStats - FileSystem *FileSystem - Allocations *Allocations - Agent *Agent + ClientStats *ClientStats + CSIController *CSIController + FileSystem *FileSystem + Allocations *Allocations + Agent *Agent } // ClientRPC is used to make a local, client only RPC call @@ -217,6 +218,7 @@ func (c *Client) streamingRpcConn(server *servers.Server, method string) (net.Co func (c *Client) setupClientRpc() { // Initialize the RPC handlers c.endpoints.ClientStats = &ClientStats{c} + c.endpoints.CSIController = &CSIController{c} c.endpoints.FileSystem = NewFileSystemEndpoint(c) c.endpoints.Allocations = NewAllocationsEndpoint(c) c.endpoints.Agent = NewAgentEndpoint(c) @@ -234,6 +236,7 @@ func (c *Client) setupClientRpc() { func (c *Client) setupClientRpcServer(server *rpc.Server) { // Register the endpoints server.Register(c.endpoints.ClientStats) + server.Register(c.endpoints.CSIController) server.Register(c.endpoints.FileSystem) server.Register(c.endpoints.Allocations) server.Register(c.endpoints.Agent) diff --git a/client/state/db_test.go b/client/state/db_test.go index c37ed8bd2..bb63507a2 100644 --- a/client/state/db_test.go +++ b/client/state/db_test.go @@ -8,6 +8,7 @@ import ( trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" + "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -238,6 +239,31 @@ func TestStateDB_DriverManager(t *testing.T) { }) } +// TestStateDB_DynamicRegistry asserts the behavior of dynamic registry state related StateDB +// methods. +func TestStateDB_DynamicRegistry(t *testing.T) { + t.Parallel() + + testDB(t, func(t *testing.T, db StateDB) { + require := require.New(t) + + // Getting nonexistent state should return nils + ps, err := db.GetDynamicPluginRegistryState() + require.NoError(err) + require.Nil(ps) + + // Putting PluginState should work + state := &dynamicplugins.RegistryState{} + require.NoError(db.PutDynamicPluginRegistryState(state)) + + // Getting should return the available state + ps, err = db.GetDynamicPluginRegistryState() + require.NoError(err) + require.NotNil(ps) + require.Equal(state, ps) + }) +} + // TestStateDB_Upgrade asserts calling Upgrade on new databases always // succeeds. func TestStateDB_Upgrade(t *testing.T) { diff --git a/client/state/interface.go b/client/state/interface.go index 2624b46ea..dc492d5ec 100644 --- a/client/state/interface.go +++ b/client/state/interface.go @@ -3,6 +3,7 @@ package state import ( "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" + "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" "github.com/hashicorp/nomad/nomad/structs" ) @@ -69,6 +70,12 @@ type StateDB interface { // state. PutDriverPluginState(state *driverstate.PluginState) error + // GetDynamicPluginRegistryState is used to retrieve a dynamic plugin manager's state. + GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) + + // PutDynamicPluginRegistryState is used to store the dynamic plugin managers's state. + PutDynamicPluginRegistryState(state *dynamicplugins.RegistryState) error + // Close the database. Unsafe for further use after calling regardless // of return value. Close() error diff --git a/client/state/memdb.go b/client/state/memdb.go index 5d64870e1..63e967e45 100644 --- a/client/state/memdb.go +++ b/client/state/memdb.go @@ -6,6 +6,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" + "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" "github.com/hashicorp/nomad/nomad/structs" ) @@ -29,6 +30,9 @@ type MemDB struct { // drivermanager -> plugin-state driverManagerPs *driverstate.PluginState + // dynamicmanager -> registry-state + dynamicManagerPs *dynamicplugins.RegistryState + logger hclog.Logger mu sync.RWMutex @@ -193,6 +197,19 @@ func (m *MemDB) PutDriverPluginState(ps *driverstate.PluginState) error { return nil } +func (m *MemDB) GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) { + m.mu.Lock() + defer m.mu.Unlock() + return m.dynamicManagerPs, nil +} + +func (m *MemDB) PutDynamicPluginRegistryState(ps *dynamicplugins.RegistryState) error { + m.mu.Lock() + defer m.mu.Unlock() + m.dynamicManagerPs = ps + return nil +} + func (m *MemDB) Close() error { m.mu.Lock() defer m.mu.Unlock() diff --git a/client/state/noopdb.go b/client/state/noopdb.go index 53364ecba..28fbd2c15 100644 --- a/client/state/noopdb.go +++ b/client/state/noopdb.go @@ -3,6 +3,7 @@ package state import ( "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" + "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" "github.com/hashicorp/nomad/nomad/structs" ) @@ -70,6 +71,14 @@ func (n NoopDB) GetDriverPluginState() (*driverstate.PluginState, error) { return nil, nil } +func (n NoopDB) PutDynamicPluginRegistryState(ps *dynamicplugins.RegistryState) error { + return nil +} + +func (n NoopDB) GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) { + return nil, nil +} + func (n NoopDB) Close() error { return nil } diff --git a/client/state/state_database.go b/client/state/state_database.go index 6d1e65fb2..a9a958f5f 100644 --- a/client/state/state_database.go +++ b/client/state/state_database.go @@ -11,6 +11,7 @@ import ( hclog "github.com/hashicorp/go-hclog" trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" + "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" "github.com/hashicorp/nomad/helper/boltdd" "github.com/hashicorp/nomad/nomad/structs" @@ -34,7 +35,10 @@ devicemanager/ |--> plugin_state -> *dmstate.PluginState drivermanager/ -|--> plugin_state -> *dmstate.PluginState +|--> plugin_state -> *driverstate.PluginState + +dynamicplugins/ +|--> registry_state -> *dynamicplugins.RegistryState */ var ( @@ -73,13 +77,20 @@ var ( // data devManagerBucket = []byte("devicemanager") - // driverManagerBucket is the bucket name container all driver manager + // driverManagerBucket is the bucket name containing all driver manager // related data driverManagerBucket = []byte("drivermanager") // managerPluginStateKey is the key by which plugin manager plugin state is // stored at managerPluginStateKey = []byte("plugin_state") + + // dynamicPluginBucket is the bucket name containing all dynamic plugin + // registry data. each dynamic plugin registry will have its own subbucket. + dynamicPluginBucket = []byte("dynamicplugins") + + // registryStateKey is the key at which dynamic plugin registry state is stored + registryStateKey = []byte("registry_state") ) // taskBucketName returns the bucket name for the given task name. @@ -598,6 +609,52 @@ func (s *BoltStateDB) GetDriverPluginState() (*driverstate.PluginState, error) { return ps, nil } +// PutDynamicPluginRegistryState stores the dynamic plugin registry's +// state or returns an error. +func (s *BoltStateDB) PutDynamicPluginRegistryState(ps *dynamicplugins.RegistryState) error { + return s.db.Update(func(tx *boltdd.Tx) error { + // Retrieve the root dynamic plugin manager bucket + dynamicBkt, err := tx.CreateBucketIfNotExists(dynamicPluginBucket) + if err != nil { + return err + } + return dynamicBkt.Put(registryStateKey, ps) + }) +} + +// GetDynamicPluginRegistryState stores the dynamic plugin registry's +// registry state or returns an error. +func (s *BoltStateDB) GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) { + var ps *dynamicplugins.RegistryState + + err := s.db.View(func(tx *boltdd.Tx) error { + dynamicBkt := tx.Bucket(dynamicPluginBucket) + if dynamicBkt == nil { + // No state, return + return nil + } + + // Restore Plugin State if it exists + ps = &dynamicplugins.RegistryState{} + if err := dynamicBkt.Get(registryStateKey, ps); err != nil { + if !boltdd.IsErrNotFound(err) { + return fmt.Errorf("failed to read dynamic plugin registry state: %v", err) + } + + // Key not found, reset ps to nil + ps = nil + } + + return nil + }) + + if err != nil { + return nil, err + } + + return ps, nil +} + // init initializes metadata entries in a newly created state database. func (s *BoltStateDB) init() error { return s.db.Update(func(tx *boltdd.Tx) error { diff --git a/client/structs/allochook.go b/client/structs/allochook.go new file mode 100644 index 000000000..59c56c0f7 --- /dev/null +++ b/client/structs/allochook.go @@ -0,0 +1,29 @@ +package structs + +import ( + "sync" + + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" +) + +// AllocHookResources contains data that is provided by AllocRunner Hooks for +// consumption by TaskRunners +type AllocHookResources struct { + CSIMounts map[string]*csimanager.MountInfo + + mu sync.RWMutex +} + +func (a *AllocHookResources) GetCSIMounts() map[string]*csimanager.MountInfo { + a.mu.RLock() + defer a.mu.RUnlock() + + return a.CSIMounts +} + +func (a *AllocHookResources) SetCSIMounts(m map[string]*csimanager.MountInfo) { + a.mu.Lock() + defer a.mu.Unlock() + + a.CSIMounts = m +} diff --git a/client/structs/csi.go b/client/structs/csi.go new file mode 100644 index 000000000..1a5b54286 --- /dev/null +++ b/client/structs/csi.go @@ -0,0 +1,131 @@ +package structs + +import ( + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" +) + +// CSIVolumeMountOptions contains the mount options that should be provided when +// attaching and mounting a volume with the CSIVolumeAttachmentModeFilesystem +// attachment mode. +type CSIVolumeMountOptions struct { + // Filesystem is the desired filesystem type that should be used by the volume + // (e.g ext4, aufs, zfs). This field is optional. + Filesystem string + + // MountFlags contain the mount options that should be used for the volume. + // These may contain _sensitive_ data and should not be leaked to logs or + // returned in debugging data. + // The total size of this field must be under 4KiB. + MountFlags []string +} + +// CSIControllerQuery is used to specify various flags for queries against CSI +// Controllers +type CSIControllerQuery struct { + // ControllerNodeID is the node that should be targeted by the request + ControllerNodeID string + + // PluginID is the plugin that should be targeted on the given node. + PluginID string +} + +type ClientCSIControllerValidateVolumeRequest struct { + VolumeID string + + AttachmentMode structs.CSIVolumeAttachmentMode + AccessMode structs.CSIVolumeAccessMode + + CSIControllerQuery +} + +type ClientCSIControllerValidateVolumeResponse struct { +} + +type ClientCSIControllerAttachVolumeRequest struct { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + VolumeID string + + // The ID of the node. This field is REQUIRED. This must match the NodeID that + // is fingerprinted by the target node for this plugin name. + ClientCSINodeID string + + // AttachmentMode indicates how the volume should be attached and mounted into + // a task. + AttachmentMode structs.CSIVolumeAttachmentMode + + // AccessMode indicates the desired concurrent access model for the volume + AccessMode structs.CSIVolumeAccessMode + + // MountOptions is an optional field that contains additional configuration + // when providing an AttachmentMode of CSIVolumeAttachmentModeFilesystem + MountOptions *CSIVolumeMountOptions + + // ReadOnly indicates that the volume will be used in a readonly fashion. This + // only works when the Controller has the PublishReadonly capability. + ReadOnly bool + + CSIControllerQuery +} + +func (c *ClientCSIControllerAttachVolumeRequest) ToCSIRequest() (*csi.ControllerPublishVolumeRequest, error) { + if c == nil { + return &csi.ControllerPublishVolumeRequest{}, nil + } + + caps, err := csi.VolumeCapabilityFromStructs(c.AttachmentMode, c.AccessMode) + if err != nil { + return nil, err + } + + return &csi.ControllerPublishVolumeRequest{ + VolumeID: c.VolumeID, + NodeID: c.ClientCSINodeID, + ReadOnly: c.ReadOnly, + VolumeCapability: caps, + }, nil +} + +type ClientCSIControllerAttachVolumeResponse struct { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to nomad. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the nomad to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + PublishContext map[string]string +} + +type ClientCSIControllerDetachVolumeRequest struct { + // The ID of the volume to be unpublished for the node + // This field is REQUIRED. + VolumeID string + + // The CSI Node ID for the Node that the volume should be detached from. + // This field is REQUIRED. This must match the NodeID that is fingerprinted + // by the target node for this plugin name. + ClientCSINodeID string + + CSIControllerQuery +} + +func (c *ClientCSIControllerDetachVolumeRequest) ToCSIRequest() *csi.ControllerUnpublishVolumeRequest { + if c == nil { + return &csi.ControllerUnpublishVolumeRequest{} + } + + return &csi.ControllerUnpublishVolumeRequest{ + VolumeID: c.VolumeID, + NodeID: c.ClientCSINodeID, + } +} + +type ClientCSIControllerDetachVolumeResponse struct{} diff --git a/command/agent/config_parse.go b/command/agent/config_parse.go index cde091bb2..a4a42f0b7 100644 --- a/command/agent/config_parse.go +++ b/command/agent/config_parse.go @@ -6,11 +6,10 @@ import ( "io" "os" "path/filepath" - "reflect" - "strings" "time" "github.com/hashicorp/hcl" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs/config" ) @@ -110,49 +109,33 @@ func durations(xs []td) error { return nil } -// removeEqualFold removes the first string that EqualFold matches -func removeEqualFold(xs *[]string, search string) { - sl := *xs - for i, x := range sl { - if strings.EqualFold(x, search) { - sl = append(sl[:i], sl[i+1:]...) - if len(sl) == 0 { - *xs = nil - } else { - *xs = sl - } - return - } - } -} - func extraKeys(c *Config) error { // hcl leaves behind extra keys when parsing JSON. These keys // are kept on the top level, taken from slices or the keys of // structs contained in slices. Clean up before looking for // extra keys. for range c.HTTPAPIResponseHeaders { - removeEqualFold(&c.ExtraKeysHCL, "http_api_response_headers") + helper.RemoveEqualFold(&c.ExtraKeysHCL, "http_api_response_headers") } for _, p := range c.Plugins { - removeEqualFold(&c.ExtraKeysHCL, p.Name) - removeEqualFold(&c.ExtraKeysHCL, "config") - removeEqualFold(&c.ExtraKeysHCL, "plugin") + helper.RemoveEqualFold(&c.ExtraKeysHCL, p.Name) + helper.RemoveEqualFold(&c.ExtraKeysHCL, "config") + helper.RemoveEqualFold(&c.ExtraKeysHCL, "plugin") } for _, k := range []string{"options", "meta", "chroot_env", "servers", "server_join"} { - removeEqualFold(&c.ExtraKeysHCL, k) - removeEqualFold(&c.ExtraKeysHCL, "client") + helper.RemoveEqualFold(&c.ExtraKeysHCL, k) + helper.RemoveEqualFold(&c.ExtraKeysHCL, "client") } // stats is an unused key, continue to silently ignore it - removeEqualFold(&c.Client.ExtraKeysHCL, "stats") + helper.RemoveEqualFold(&c.Client.ExtraKeysHCL, "stats") // Remove HostVolume extra keys for _, hv := range c.Client.HostVolumes { - removeEqualFold(&c.Client.ExtraKeysHCL, hv.Name) - removeEqualFold(&c.Client.ExtraKeysHCL, "host_volume") + helper.RemoveEqualFold(&c.Client.ExtraKeysHCL, hv.Name) + helper.RemoveEqualFold(&c.Client.ExtraKeysHCL, "host_volume") } // Remove AuditConfig extra keys @@ -167,60 +150,14 @@ func extraKeys(c *Config) error { } for _, k := range []string{"enabled_schedulers", "start_join", "retry_join", "server_join"} { - removeEqualFold(&c.ExtraKeysHCL, k) - removeEqualFold(&c.ExtraKeysHCL, "server") + helper.RemoveEqualFold(&c.ExtraKeysHCL, k) + helper.RemoveEqualFold(&c.ExtraKeysHCL, "server") } for _, k := range []string{"datadog_tags"} { - removeEqualFold(&c.ExtraKeysHCL, k) - removeEqualFold(&c.ExtraKeysHCL, "telemetry") + helper.RemoveEqualFold(&c.ExtraKeysHCL, k) + helper.RemoveEqualFold(&c.ExtraKeysHCL, "telemetry") } - return extraKeysImpl([]string{}, reflect.ValueOf(*c)) -} - -// extraKeysImpl returns an error if any extraKeys array is not empty -func extraKeysImpl(path []string, val reflect.Value) error { - stype := val.Type() - for i := 0; i < stype.NumField(); i++ { - ftype := stype.Field(i) - fval := val.Field(i) - - name := ftype.Name - prop := "" - tagSplit(ftype, "hcl", &name, &prop) - - if fval.Kind() == reflect.Ptr { - fval = reflect.Indirect(fval) - } - - // struct? recurse. add the struct's key to the path - if fval.Kind() == reflect.Struct { - err := extraKeysImpl(append([]string{name}, path...), fval) - if err != nil { - return err - } - } - - if "unusedKeys" == prop { - if ks, ok := fval.Interface().([]string); ok && len(ks) != 0 { - return fmt.Errorf("%s unexpected keys %s", - strings.Join(path, "."), - strings.Join(ks, ", ")) - } - } - } - return nil -} - -// tagSplit reads the named tag from the structfield and splits its values into strings -func tagSplit(field reflect.StructField, tagName string, vars ...*string) { - tag := strings.Split(field.Tag.Get(tagName), ",") - end := len(tag) - 1 - for i, s := range vars { - if i > end { - return - } - *s = tag[i] - } + return helper.UnusedKeys(c) } diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go new file mode 100644 index 000000000..28db3cc8b --- /dev/null +++ b/command/agent/csi_endpoint.go @@ -0,0 +1,200 @@ +package agent + +import ( + "net/http" + "strings" + + "github.com/hashicorp/nomad/nomad/structs" +) + +const errRequiresType = "Missing required parameter type" + +func (s *HTTPServer) CSIVolumesRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "GET" { + return nil, CodedError(405, ErrInvalidMethod) + } + + // Type filters volume lists to a specific type. When support for non-CSI volumes is + // introduced, we'll need to dispatch here + query := req.URL.Query() + qtype, ok := query["type"] + if !ok { + return nil, CodedError(400, errRequiresType) + } + if qtype[0] != "csi" { + return nil, nil + } + + args := structs.CSIVolumeListRequest{} + + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + if plugin, ok := query["plugin_id"]; ok { + args.PluginID = plugin[0] + } + if node, ok := query["node_id"]; ok { + args.NodeID = node[0] + } + + var out structs.CSIVolumeListResponse + if err := s.agent.RPC("CSIVolume.List", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + return out.Volumes, nil +} + +// CSIVolumeSpecificRequest dispatches GET and PUT +func (s *HTTPServer) CSIVolumeSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Tokenize the suffix of the path to get the volume id + reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/volume/csi/") + tokens := strings.Split(reqSuffix, "/") + if len(tokens) > 2 || len(tokens) < 1 { + return nil, CodedError(404, resourceNotFoundErr) + } + id := tokens[0] + + switch req.Method { + case "GET": + return s.csiVolumeGet(id, resp, req) + case "PUT": + return s.csiVolumePut(id, resp, req) + case "DELETE": + return s.csiVolumeDelete(id, resp, req) + default: + return nil, CodedError(405, ErrInvalidMethod) + } +} + +func (s *HTTPServer) csiVolumeGet(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + args := structs.CSIVolumeGetRequest{ + ID: id, + } + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + var out structs.CSIVolumeGetResponse + if err := s.agent.RPC("CSIVolume.Get", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + if out.Volume == nil { + return nil, CodedError(404, "volume not found") + } + + return out.Volume, nil +} + +func (s *HTTPServer) csiVolumePut(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "PUT" { + return nil, CodedError(405, ErrInvalidMethod) + } + + args0 := structs.CSIVolumeRegisterRequest{} + if err := decodeBody(req, &args0); err != nil { + return err, CodedError(400, err.Error()) + } + + args := structs.CSIVolumeRegisterRequest{ + Volumes: args0.Volumes, + } + s.parseWriteRequest(req, &args.WriteRequest) + + var out structs.CSIVolumeRegisterResponse + if err := s.agent.RPC("CSIVolume.Register", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + + return nil, nil +} + +func (s *HTTPServer) csiVolumeDelete(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "DELETE" { + return nil, CodedError(405, ErrInvalidMethod) + } + + args := structs.CSIVolumeDeregisterRequest{ + VolumeIDs: []string{id}, + } + s.parseWriteRequest(req, &args.WriteRequest) + + var out structs.CSIVolumeDeregisterResponse + if err := s.agent.RPC("CSIVolume.Deregister", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + + return nil, nil +} + +// CSIPluginsRequest lists CSI plugins +func (s *HTTPServer) CSIPluginsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "GET" { + return nil, CodedError(405, ErrInvalidMethod) + } + + // Type filters plugin lists to a specific type. When support for non-CSI plugins is + // introduced, we'll need to dispatch here + query := req.URL.Query() + qtype, ok := query["type"] + if !ok { + return nil, CodedError(400, errRequiresType) + } + if qtype[0] != "csi" { + return nil, nil + } + + args := structs.CSIPluginListRequest{} + + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + var out structs.CSIPluginListResponse + if err := s.agent.RPC("CSIPlugin.List", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + return out.Plugins, nil +} + +// CSIPluginSpecificRequest list the job with CSIInfo +func (s *HTTPServer) CSIPluginSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "GET" { + return nil, CodedError(405, ErrInvalidMethod) + } + + // Tokenize the suffix of the path to get the plugin id + reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/plugin/csi/") + tokens := strings.Split(reqSuffix, "/") + if len(tokens) > 2 || len(tokens) < 1 { + return nil, CodedError(404, resourceNotFoundErr) + } + id := tokens[0] + + args := structs.CSIPluginGetRequest{ID: id} + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + var out structs.CSIPluginGetResponse + if err := s.agent.RPC("CSIPlugin.Get", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + if out.Plugin == nil { + return nil, CodedError(404, "plugin not found") + } + + return out.Plugin, nil +} diff --git a/command/agent/http.go b/command/agent/http.go index 5821978d3..68cf7b46a 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -263,6 +263,11 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) { s.mux.HandleFunc("/v1/deployments", s.wrap(s.DeploymentsRequest)) s.mux.HandleFunc("/v1/deployment/", s.wrap(s.DeploymentSpecificRequest)) + s.mux.HandleFunc("/v1/volumes", s.wrap(s.CSIVolumesRequest)) + s.mux.HandleFunc("/v1/volume/csi/", s.wrap(s.CSIVolumeSpecificRequest)) + s.mux.HandleFunc("/v1/plugins", s.wrap(s.CSIPluginsRequest)) + s.mux.HandleFunc("/v1/plugin/csi/", s.wrap(s.CSIPluginSpecificRequest)) + s.mux.HandleFunc("/v1/acl/policies", s.wrap(s.ACLPoliciesRequest)) s.mux.HandleFunc("/v1/acl/policy/", s.wrap(s.ACLPolicySpecificRequest)) diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index b394ed357..b76329c56 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -749,8 +749,9 @@ func ApiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) { if l := len(taskGroup.Volumes); l != 0 { tg.Volumes = make(map[string]*structs.VolumeRequest, l) for k, v := range taskGroup.Volumes { - if v.Type != structs.VolumeTypeHost { - // Ignore non-host volumes in this iteration currently. + if v.Type != structs.VolumeTypeHost && v.Type != structs.VolumeTypeCSI { + // Ignore volumes we don't understand in this iteration currently. + // - This is because we don't currently have a way to return errors here. continue } @@ -761,6 +762,13 @@ func ApiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) { Source: v.Source, } + if v.MountOptions != nil { + vol.MountOptions = &structs.CSIMountOptions{ + FSType: v.MountOptions.FSType, + MountFlags: v.MountOptions.MountFlags, + } + } + tg.Volumes[k] = vol } } @@ -812,6 +820,7 @@ func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) { structsTask.Kind = structs.TaskKind(apiTask.Kind) structsTask.Constraints = ApiConstraintsToStructs(apiTask.Constraints) structsTask.Affinities = ApiAffinitiesToStructs(apiTask.Affinities) + structsTask.CSIPluginConfig = ApiCSIPluginConfigToStructsCSIPluginConfig(apiTask.CSIPluginConfig) if l := len(apiTask.VolumeMounts); l != 0 { structsTask.VolumeMounts = make([]*structs.VolumeMount, l) @@ -933,6 +942,18 @@ func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) { } } +func ApiCSIPluginConfigToStructsCSIPluginConfig(apiConfig *api.TaskCSIPluginConfig) *structs.TaskCSIPluginConfig { + if apiConfig == nil { + return nil + } + + sc := &structs.TaskCSIPluginConfig{} + sc.ID = apiConfig.ID + sc.Type = structs.CSIPluginType(apiConfig.Type) + sc.MountDir = apiConfig.MountDir + return sc +} + func ApiResourcesToStructs(in *api.Resources) *structs.Resources { if in == nil { return nil diff --git a/command/alloc_status.go b/command/alloc_status.go index d9eb416aa..b9479aefd 100644 --- a/command/alloc_status.go +++ b/command/alloc_status.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/api/contexts" "github.com/hashicorp/nomad/client/allocrunner/taskrunner/restarts" + "github.com/hashicorp/nomad/nomad/structs" "github.com/posener/complete" ) @@ -214,7 +215,7 @@ func (c *AllocStatusCommand) Run(args []string) int { c.Ui.Output("Omitting resource statistics since the node is down.") } } - c.outputTaskDetails(alloc, stats, displayStats) + c.outputTaskDetails(alloc, stats, displayStats, verbose) } // Format the detailed status @@ -362,12 +363,13 @@ func futureEvalTimePretty(evalID string, client *api.Client) string { // outputTaskDetails prints task details for each task in the allocation, // optionally printing verbose statistics if displayStats is set -func (c *AllocStatusCommand) outputTaskDetails(alloc *api.Allocation, stats *api.AllocResourceUsage, displayStats bool) { +func (c *AllocStatusCommand) outputTaskDetails(alloc *api.Allocation, stats *api.AllocResourceUsage, displayStats bool, verbose bool) { for task := range c.sortedTaskStateIterator(alloc.TaskStates) { state := alloc.TaskStates[task] c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[bold]Task %q is %q[reset]", task, state.State))) c.outputTaskResources(alloc, task, stats, displayStats) c.Ui.Output("") + c.outputTaskVolumes(alloc, task, verbose) c.outputTaskStatus(state) } } @@ -721,3 +723,80 @@ func (c *AllocStatusCommand) sortedTaskStateIterator(m map[string]*api.TaskState close(output) return output } + +func (c *AllocStatusCommand) outputTaskVolumes(alloc *api.Allocation, taskName string, verbose bool) { + var task *api.Task + var tg *api.TaskGroup +FOUND: + for _, tg = range alloc.Job.TaskGroups { + for _, task = range tg.Tasks { + if task.Name == taskName { + break FOUND + } + } + } + if task == nil || tg == nil { + c.Ui.Error(fmt.Sprintf("Could not find task data for %q", taskName)) + return + } + if len(task.VolumeMounts) == 0 { + return + } + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return + } + + var hostVolumesOutput []string + var csiVolumesOutput []string + hostVolumesOutput = append(hostVolumesOutput, "ID|Read Only") + if verbose { + csiVolumesOutput = append(csiVolumesOutput, + "ID|Plugin|Provider|Schedulable|Read Only|Mount Options") + } else { + csiVolumesOutput = append(csiVolumesOutput, "ID|Read Only") + } + + for _, volMount := range task.VolumeMounts { + volReq := tg.Volumes[*volMount.Volume] + switch volReq.Type { + case structs.VolumeTypeHost: + hostVolumesOutput = append(hostVolumesOutput, + fmt.Sprintf("%s|%v", volReq.Name, *volMount.ReadOnly)) + case structs.VolumeTypeCSI: + if verbose { + // there's an extra API call per volume here so we toggle it + // off with the -verbose flag + vol, _, err := client.CSIVolumes().Info(volReq.Name, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error retrieving volume info for %q: %s", + volReq.Name, err)) + continue + } + csiVolumesOutput = append(csiVolumesOutput, + fmt.Sprintf("%s|%s|%s|%v|%v|%s", + volReq.Name, + vol.PluginID, + vol.Provider, + vol.Schedulable, + volReq.ReadOnly, + csiVolMountOption(vol.MountOptions, volReq.MountOptions), + )) + } else { + csiVolumesOutput = append(csiVolumesOutput, + fmt.Sprintf("%s|%v", volReq.Name, volReq.ReadOnly)) + } + } + } + if len(hostVolumesOutput) > 1 { + c.Ui.Output("Host Volumes:") + c.Ui.Output(formatList(hostVolumesOutput)) + c.Ui.Output("") // line padding to next stanza + } + if len(csiVolumesOutput) > 1 { + c.Ui.Output("CSI Volumes:") + c.Ui.Output(formatList(csiVolumesOutput)) + c.Ui.Output("") // line padding to next stanza + } +} diff --git a/command/alloc_status_test.go b/command/alloc_status_test.go index 0c0bf38e9..6f5b35c51 100644 --- a/command/alloc_status_test.go +++ b/command/alloc_status_test.go @@ -2,11 +2,14 @@ package command import ( "fmt" + "io/ioutil" + "os" "regexp" "strings" "testing" "time" + "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -315,3 +318,146 @@ func TestAllocStatusCommand_AutocompleteArgs(t *testing.T) { assert.Equal(1, len(res)) assert.Equal(a.ID, res[0]) } + +func TestAllocStatusCommand_HostVolumes(t *testing.T) { + t.Parallel() + // We have to create a tempdir for the host volume even though we're + // not going to use it b/c the server validates the config on startup + tmpDir, err := ioutil.TempDir("", "vol0") + if err != nil { + t.Fatalf("unable to create tempdir for test: %v", err) + } + defer os.RemoveAll(tmpDir) + + vol0 := uuid.Generate() + srv, _, url := testServer(t, true, func(c *agent.Config) { + c.Client.HostVolumes = []*structs.ClientHostVolumeConfig{ + { + Name: vol0, + Path: tmpDir, + ReadOnly: false, + }, + } + }) + defer srv.Shutdown() + state := srv.Agent.Server().State() + + // Upsert the job and alloc + node := mock.Node() + alloc := mock.Alloc() + alloc.Metrics = &structs.AllocMetric{} + alloc.NodeID = node.ID + job := alloc.Job + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + vol0: { + Name: vol0, + Type: structs.VolumeTypeHost, + Source: tmpDir, + }, + } + job.TaskGroups[0].Tasks[0].VolumeMounts = []*structs.VolumeMount{ + { + Volume: vol0, + Destination: "/var/www", + ReadOnly: true, + PropagationMode: "private", + }, + } + // fakes the placement enough so that we have something to iterate + // on in 'nomad alloc status' + alloc.TaskStates = map[string]*structs.TaskState{ + "web": &structs.TaskState{ + Events: []*structs.TaskEvent{ + structs.NewTaskEvent("test event").SetMessage("test msg"), + }, + }, + } + summary := mock.JobSummary(alloc.JobID) + require.NoError(t, state.UpsertJobSummary(1004, summary)) + require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc})) + + ui := new(cli.MockUi) + cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}} + if code := cmd.Run([]string{"-address=" + url, "-verbose", alloc.ID}); code != 0 { + t.Fatalf("expected exit 0, got: %d", code) + } + out := ui.OutputWriter.String() + require.Contains(t, out, "Host Volumes") + require.Contains(t, out, fmt.Sprintf("%s true", vol0)) + require.NotContains(t, out, "CSI Volumes") +} + +func TestAllocStatusCommand_CSIVolumes(t *testing.T) { + t.Parallel() + srv, _, url := testServer(t, true, nil) + defer srv.Shutdown() + state := srv.Agent.Server().State() + + // Upsert the node, plugin, and volume + vol0 := uuid.Generate() + node := mock.Node() + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + err := state.UpsertNode(1001, node) + require.NoError(t, err) + + vols := []*structs.CSIVolume{{ + ID: vol0, + Namespace: structs.DefaultNamespace, + PluginID: "minnie", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + Topologies: []*structs.CSITopology{{ + Segments: map[string]string{"foo": "bar"}, + }}, + }} + err = state.CSIVolumeRegister(1002, vols) + require.NoError(t, err) + + // Upsert the job and alloc + alloc := mock.Alloc() + alloc.Metrics = &structs.AllocMetric{} + alloc.NodeID = node.ID + job := alloc.Job + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + vol0: { + Name: vol0, + Type: structs.VolumeTypeCSI, + Source: "/tmp/vol0", + }, + } + job.TaskGroups[0].Tasks[0].VolumeMounts = []*structs.VolumeMount{ + { + Volume: vol0, + Destination: "/var/www", + ReadOnly: true, + PropagationMode: "private", + }, + } + // if we don't set a task state, there's nothing to iterate on alloc status + alloc.TaskStates = map[string]*structs.TaskState{ + "web": &structs.TaskState{ + Events: []*structs.TaskEvent{ + structs.NewTaskEvent("test event").SetMessage("test msg"), + }, + }, + } + summary := mock.JobSummary(alloc.JobID) + require.NoError(t, state.UpsertJobSummary(1004, summary)) + require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc})) + + ui := new(cli.MockUi) + cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}} + if code := cmd.Run([]string{"-address=" + url, "-verbose", alloc.ID}); code != 0 { + t.Fatalf("expected exit 0, got: %d", code) + } + out := ui.OutputWriter.String() + require.Contains(t, out, "CSI Volumes") + require.Contains(t, out, fmt.Sprintf("%s minnie", vol0)) + require.NotContains(t, out, "Host Volumes") +} diff --git a/command/commands.go b/command/commands.go index ff2100a9d..dbbcec006 100644 --- a/command/commands.go +++ b/command/commands.go @@ -493,6 +493,17 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { }, nil }, + "plugin": func() (cli.Command, error) { + return &PluginCommand{ + Meta: meta, + }, nil + }, + "plugin status": func() (cli.Command, error) { + return &PluginStatusCommand{ + Meta: meta, + }, nil + }, + "quota": func() (cli.Command, error) { return &QuotaCommand{ Meta: meta, @@ -646,6 +657,26 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { Ui: meta.Ui, }, nil }, + "volume": func() (cli.Command, error) { + return &VolumeCommand{ + Meta: meta, + }, nil + }, + "volume status": func() (cli.Command, error) { + return &VolumeStatusCommand{ + Meta: meta, + }, nil + }, + "volume register": func() (cli.Command, error) { + return &VolumeRegisterCommand{ + Meta: meta, + }, nil + }, + "volume deregister": func() (cli.Command, error) { + return &VolumeDeregisterCommand{ + Meta: meta, + }, nil + }, } deprecated := map[string]cli.CommandFactory{ diff --git a/command/node_status.go b/command/node_status.go index 4bbf443b9..2651c4375 100644 --- a/command/node_status.go +++ b/command/node_status.go @@ -299,6 +299,40 @@ func nodeDrivers(n *api.Node) []string { return drivers } +func nodeCSIControllerNames(n *api.Node) []string { + var names []string + for name := range n.CSIControllerPlugins { + names = append(names, name) + } + sort.Strings(names) + return names +} + +func nodeCSINodeNames(n *api.Node) []string { + var names []string + for name := range n.CSINodePlugins { + names = append(names, name) + } + sort.Strings(names) + return names +} + +func nodeCSIVolumeNames(n *api.Node, allocs []*api.Allocation) []string { + var names []string + for _, alloc := range allocs { + tg := alloc.GetTaskGroup() + if tg == nil || len(tg.Volumes) == 0 { + continue + } + + for _, v := range tg.Volumes { + names = append(names, v.Name) + } + } + sort.Strings(names) + return names +} + func nodeVolumeNames(n *api.Node) []string { var volumes []string for name := range n.HostVolumes { @@ -331,6 +365,20 @@ func formatDrain(n *api.Node) string { } func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { + // Make one API call for allocations + nodeAllocs, _, err := client.Nodes().Allocations(node.ID, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err)) + return 1 + } + + var runningAllocs []*api.Allocation + for _, alloc := range nodeAllocs { + if alloc.ClientStatus == "running" { + runningAllocs = append(runningAllocs, alloc) + } + } + // Format the header output basic := []string{ fmt.Sprintf("ID|%s", node.ID), @@ -340,15 +388,18 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { fmt.Sprintf("Drain|%v", formatDrain(node)), fmt.Sprintf("Eligibility|%s", node.SchedulingEligibility), fmt.Sprintf("Status|%s", node.Status), + fmt.Sprintf("CSI Controllers|%s", strings.Join(nodeCSIControllerNames(node), ",")), + fmt.Sprintf("CSI Drivers|%s", strings.Join(nodeCSINodeNames(node), ",")), } if c.short { basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ","))) + basic = append(basic, fmt.Sprintf("CSI Volumes|%s", strings.Join(nodeCSIVolumeNames(node, runningAllocs), ","))) basic = append(basic, fmt.Sprintf("Drivers|%s", strings.Join(nodeDrivers(node), ","))) c.Ui.Output(c.Colorize().Color(formatKV(basic))) // Output alloc info - if err := c.outputAllocInfo(client, node); err != nil { + if err := c.outputAllocInfo(node, nodeAllocs); err != nil { c.Ui.Error(fmt.Sprintf("%s", err)) return 1 } @@ -371,7 +422,7 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { // driver info in the basic output if !c.verbose { basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ","))) - + basic = append(basic, fmt.Sprintf("CSI Volumes|%s", strings.Join(nodeCSIVolumeNames(node, runningAllocs), ","))) driverStatus := fmt.Sprintf("Driver Status| %s", c.outputTruncatedNodeDriverInfo(node)) basic = append(basic, driverStatus) } @@ -382,6 +433,7 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { // If we're running in verbose mode, include full host volume and driver info if c.verbose { c.outputNodeVolumeInfo(node) + c.outputNodeCSIVolumeInfo(client, node, runningAllocs) c.outputNodeDriverInfo(node) } @@ -389,12 +441,6 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { c.outputNodeStatusEvents(node) // Get list of running allocations on the node - runningAllocs, err := getRunningAllocs(client, node.ID) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error querying node for running allocations: %s", err)) - return 1 - } - allocatedResources := getAllocatedResources(client, runningAllocs, node) c.Ui.Output(c.Colorize().Color("\n[bold]Allocated Resources[reset]")) c.Ui.Output(formatList(allocatedResources)) @@ -432,7 +478,7 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { } } - if err := c.outputAllocInfo(client, node); err != nil { + if err := c.outputAllocInfo(node, nodeAllocs); err != nil { c.Ui.Error(fmt.Sprintf("%s", err)) return 1 } @@ -440,12 +486,7 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { return 0 } -func (c *NodeStatusCommand) outputAllocInfo(client *api.Client, node *api.Node) error { - nodeAllocs, _, err := client.Nodes().Allocations(node.ID, nil) - if err != nil { - return fmt.Errorf("Error querying node allocations: %s", err) - } - +func (c *NodeStatusCommand) outputAllocInfo(node *api.Node, nodeAllocs []*api.Allocation) error { c.Ui.Output(c.Colorize().Color("\n[bold]Allocations[reset]")) c.Ui.Output(formatAllocList(nodeAllocs, c.verbose, c.length)) @@ -495,6 +536,58 @@ func (c *NodeStatusCommand) outputNodeVolumeInfo(node *api.Node) { c.Ui.Output(formatList(output)) } +func (c *NodeStatusCommand) outputNodeCSIVolumeInfo(client *api.Client, node *api.Node, runningAllocs []*api.Allocation) { + c.Ui.Output(c.Colorize().Color("\n[bold]CSI Volumes")) + + // Duplicate nodeCSIVolumeNames to sort by name but also index volume names to ids + var names []string + requests := map[string]*api.VolumeRequest{} + for _, alloc := range runningAllocs { + tg := alloc.GetTaskGroup() + if tg == nil || len(tg.Volumes) == 0 { + continue + } + + for _, v := range tg.Volumes { + names = append(names, v.Name) + requests[v.Source] = v + } + } + if len(names) == 0 { + return + } + sort.Strings(names) + + // Fetch the volume objects with current status + // Ignore an error, all we're going to do is omit the volumes + volumes := map[string]*api.CSIVolumeListStub{} + vs, _ := client.Nodes().CSIVolumes(node.ID, nil) + for _, v := range vs { + n := requests[v.ID].Name + volumes[n] = v + } + + // Output the volumes in name order + output := make([]string, 0, len(names)+1) + output = append(output, "ID|Name|Plugin ID|Schedulable|Provider|Access Mode|Mount Options") + for _, name := range names { + v := volumes[name] + r := requests[v.ID] + output = append(output, fmt.Sprintf( + "%s|%s|%s|%t|%s|%s|%s", + v.ID, + name, + v.PluginID, + v.Schedulable, + v.Provider, + v.AccessMode, + csiVolMountOption(v.MountOptions, r.MountOptions), + )) + } + + c.Ui.Output(formatList(output)) +} + func (c *NodeStatusCommand) outputNodeDriverInfo(node *api.Node) { c.Ui.Output(c.Colorize().Color("\n[bold]Drivers")) diff --git a/command/plugin.go b/command/plugin.go new file mode 100644 index 000000000..7128e7cbe --- /dev/null +++ b/command/plugin.go @@ -0,0 +1,26 @@ +package command + +import "github.com/mitchellh/cli" + +type PluginCommand struct { + Meta +} + +func (c *PluginCommand) Help() string { + helpText := ` +Usage nomad plugin status [options] [plugin] + + This command groups subcommands for interacting with plugins. +` + return helpText +} + +func (c *PluginCommand) Synopsis() string { + return "Inspect plugins" +} + +func (c *PluginCommand) Name() string { return "plugin" } + +func (c *PluginCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/plugin_status.go b/command/plugin_status.go new file mode 100644 index 000000000..95eeb5cf6 --- /dev/null +++ b/command/plugin_status.go @@ -0,0 +1,146 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/nomad/api/contexts" + "github.com/posener/complete" +) + +type PluginStatusCommand struct { + Meta + length int + short bool + verbose bool + json bool + template string +} + +func (c *PluginStatusCommand) Help() string { + helpText := ` +Usage nomad plugin status [options] + + Display status information about a plugin. If no plugin id is given, + a list of all plugins will be displayed. + +General Options: + + ` + generalOptionsUsage() + ` + +Status Options: + + -type + List only plugins of type . + + -short + Display short output. + + -verbose + Display full information. + + -json + Output the allocation in its JSON format. + + -t + Format and display allocation using a Go template. +` + return helpText +} + +func (c *PluginStatusCommand) Synopsis() string { + return "Display status information about a plugin" +} + +// predictVolumeType is also used in volume_status +var predictVolumeType = complete.PredictFunc(func(a complete.Args) []string { + types := []string{"csi"} + for _, t := range types { + if strings.Contains(t, a.Last) { + return []string{t} + } + } + return nil +}) + +func (c *PluginStatusCommand) AutocompleteFlags() complete.Flags { + return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), + complete.Flags{ + "-type": predictVolumeType, + "-short": complete.PredictNothing, + "-verbose": complete.PredictNothing, + "-json": complete.PredictNothing, + "-t": complete.PredictAnything, + }) +} + +func (c *PluginStatusCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictFunc(func(a complete.Args) []string { + client, err := c.Meta.Client() + if err != nil { + return nil + } + + resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Plugins, nil) + if err != nil { + return []string{} + } + return resp.Matches[contexts.Plugins] + }) +} + +func (c *PluginStatusCommand) Name() string { return "plugin status" } + +func (c *PluginStatusCommand) Run(args []string) int { + var typeArg string + + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + flags.StringVar(&typeArg, "type", "", "") + flags.BoolVar(&c.short, "short", false, "") + flags.BoolVar(&c.verbose, "verbose", false, "") + flags.BoolVar(&c.json, "json", false, "") + flags.StringVar(&c.template, "t", "", "") + + if err := flags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) + return 1 + } + + typeArg = strings.ToLower(typeArg) + + // Check that we either got no arguments or exactly one. + args = flags.Args() + if len(args) > 1 { + c.Ui.Error("This command takes either no arguments or one: ") + c.Ui.Error(commandErrorText(c)) + return 1 + } + + // Truncate the id unless full length is requested + c.length = shortId + if c.verbose { + c.length = fullId + } + + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + id := "" + if len(args) == 1 { + id = args[0] + } + + code := c.csiStatus(client, id) + if code != 0 { + return code + } + + // Extend this section with other plugin implementations + + return 0 +} diff --git a/command/plugin_status_csi.go b/command/plugin_status_csi.go new file mode 100644 index 000000000..36f1284bd --- /dev/null +++ b/command/plugin_status_csi.go @@ -0,0 +1,113 @@ +package command + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/nomad/api" +) + +func (c *PluginStatusCommand) csiBanner() { + if !(c.json || len(c.template) > 0) { + c.Ui.Output(c.Colorize().Color("[bold]Container Storage Interface[reset]")) + } +} + +func (c *PluginStatusCommand) csiStatus(client *api.Client, id string) int { + if id == "" { + c.csiBanner() + plugs, _, err := client.CSIPlugins().List(nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying CSI plugins: %s", err)) + return 1 + } + + if len(plugs) == 0 { + // No output if we have no plugins + c.Ui.Error("No CSI plugins") + } else { + str, err := c.csiFormatPlugins(plugs) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) + return 1 + } + c.Ui.Output(str) + } + return 0 + } + + // Lookup matched a single plugin + plug, _, err := client.CSIPlugins().Info(id, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying plugin: %s", err)) + return 1 + } + + str, err := c.csiFormatPlugin(plug) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error formatting plugin: %s", err)) + return 1 + } + + c.Ui.Output(str) + return 0 +} + +func (c *PluginStatusCommand) csiFormatPlugins(plugs []*api.CSIPluginListStub) (string, error) { + // Sort the output by quota name + sort.Slice(plugs, func(i, j int) bool { return plugs[i].ID < plugs[j].ID }) + + if c.json || len(c.template) > 0 { + out, err := Format(c.json, c.template, plugs) + if err != nil { + return "", fmt.Errorf("format error: %v", err) + } + return out, nil + } + + rows := make([]string, len(plugs)+1) + rows[0] = "ID|Provider|Controllers Healthy/Expected|Nodes Healthy/Expected" + for i, p := range plugs { + rows[i+1] = fmt.Sprintf("%s|%s|%d/%d|%d/%d", + limit(p.ID, c.length), + p.Provider, + p.ControllersHealthy, + p.ControllersExpected, + p.NodesHealthy, + p.NodesExpected, + ) + } + return formatList(rows), nil +} + +func (c *PluginStatusCommand) csiFormatPlugin(plug *api.CSIPlugin) (string, error) { + if c.json || len(c.template) > 0 { + out, err := Format(c.json, c.template, plug) + if err != nil { + return "", fmt.Errorf("format error: %v", err) + } + return out, nil + } + + output := []string{ + fmt.Sprintf("ID|%s", plug.ID), + fmt.Sprintf("Provider|%s", plug.Provider), + fmt.Sprintf("Version|%s", plug.Version), + fmt.Sprintf("Controllers Healthy|%d", plug.ControllersHealthy), + fmt.Sprintf("Controllers Expected|%d", len(plug.Controllers)), + fmt.Sprintf("Nodes Healthy|%d", plug.NodesHealthy), + fmt.Sprintf("Nodes Expected|%d", len(plug.Nodes)), + } + + // Exit early + if c.short { + return formatKV(output), nil + } + + // Format the allocs + banner := c.Colorize().Color("\n[bold]Allocations[reset]") + allocs := formatAllocListStubs(plug.Allocations, c.verbose, c.length) + full := []string{formatKV(output), banner, allocs} + return strings.Join(full, "\n"), nil +} diff --git a/command/plugin_status_test.go b/command/plugin_status_test.go new file mode 100644 index 000000000..cdf38e67a --- /dev/null +++ b/command/plugin_status_test.go @@ -0,0 +1,57 @@ +package command + +import ( + "testing" + + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/nomad" + "github.com/mitchellh/cli" + "github.com/posener/complete" + "github.com/stretchr/testify/require" +) + +func TestPluginStatusCommand_Implements(t *testing.T) { + t.Parallel() + var _ cli.Command = &PluginStatusCommand{} +} + +func TestPluginStatusCommand_Fails(t *testing.T) { + t.Parallel() + ui := new(cli.MockUi) + cmd := &PluginStatusCommand{Meta: Meta{Ui: ui}} + + // Fails on misuse + code := cmd.Run([]string{"some", "bad", "args"}) + require.Equal(t, 1, code) + + out := ui.ErrorWriter.String() + require.Contains(t, out, commandErrorText(cmd)) + ui.ErrorWriter.Reset() +} + +func TestPluginStatusCommand_AutocompleteArgs(t *testing.T) { + t.Parallel() + + srv, _, url := testServer(t, true, nil) + defer srv.Shutdown() + + ui := new(cli.MockUi) + cmd := &PluginStatusCommand{Meta: Meta{Ui: ui, flagAddress: url}} + + // Create a plugin + id := "long-plugin-id" + state := srv.Agent.Server().State() + cleanup := nomad.CreateTestCSIPlugin(state, id) + defer cleanup() + ws := memdb.NewWatchSet() + plug, err := state.CSIPluginByID(ws, id) + require.NoError(t, err) + + prefix := plug.ID[:len(plug.ID)-5] + args := complete.Args{Last: prefix} + predictor := cmd.AutocompleteArgs() + + res := predictor.Predict(args) + require.Equal(t, 1, len(res)) + require.Equal(t, plug.ID, res[0]) +} diff --git a/command/status.go b/command/status.go index 4cf028b39..c67a73f62 100644 --- a/command/status.go +++ b/command/status.go @@ -162,6 +162,10 @@ func (c *StatusCommand) Run(args []string) int { cmd = &NamespaceStatusCommand{Meta: c.Meta} case contexts.Quotas: cmd = &QuotaStatusCommand{Meta: c.Meta} + case contexts.Plugins: + cmd = &PluginStatusCommand{Meta: c.Meta} + case contexts.Volumes: + cmd = &VolumeStatusCommand{Meta: c.Meta} default: c.Ui.Error(fmt.Sprintf("Unable to resolve ID: %q", id)) return 1 diff --git a/command/volume.go b/command/volume.go new file mode 100644 index 000000000..83eb44093 --- /dev/null +++ b/command/volume.go @@ -0,0 +1,46 @@ +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +type VolumeCommand struct { + Meta +} + +func (c *VolumeCommand) Help() string { + helpText := ` +Usage: nomad volume [options] + + volume groups commands that interact with volumes. + + Register a new volume or update an existing volume: + + $ nomad volume register + + Examine the status of a volume: + + $ nomad volume status + + Deregister an unused volume: + + $ nomad volume deregister + + Please see the individual subcommand help for detailed usage information. +` + return strings.TrimSpace(helpText) +} + +func (c *VolumeCommand) Name() string { + return "volume" +} + +func (c *VolumeCommand) Synopsis() string { + return "Interact with volumes" +} + +func (c *VolumeCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/volume_deregister.go b/command/volume_deregister.go new file mode 100644 index 000000000..eafb14ea6 --- /dev/null +++ b/command/volume_deregister.go @@ -0,0 +1,88 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/nomad/api/contexts" + "github.com/posener/complete" +) + +type VolumeDeregisterCommand struct { + Meta +} + +func (c *VolumeDeregisterCommand) Help() string { + helpText := ` +Usage: nomad volume deregister [options] + + Remove an unused volume from Nomad. + +General Options: + + ` + generalOptionsUsage() + + return strings.TrimSpace(helpText) +} + +func (c *VolumeDeregisterCommand) AutocompleteFlags() complete.Flags { + return c.Meta.AutocompleteFlags(FlagSetClient) +} + +func (c *VolumeDeregisterCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictFunc(func(a complete.Args) []string { + client, err := c.Meta.Client() + if err != nil { + return nil + } + + // When multiple volume types are implemented, this search should merge contexts + resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Volumes, nil) + if err != nil { + return []string{} + } + return resp.Matches[contexts.Volumes] + }) +} + +func (c *VolumeDeregisterCommand) Synopsis() string { + return "Remove a volume" +} + +func (c *VolumeDeregisterCommand) Name() string { return "volume deregister" } + +func (c *VolumeDeregisterCommand) Run(args []string) int { + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + + if err := flags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) + return 1 + } + + // Check that we get exactly one argument + args = flags.Args() + if l := len(args); l != 1 { + c.Ui.Error("This command takes one argument: ") + c.Ui.Error(commandErrorText(c)) + return 1 + } + volID := args[0] + + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + // Deregister only works on CSI volumes, but could be extended to support other + // network interfaces or host volumes + err = client.CSIVolumes().Deregister(volID, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error deregistering volume: %s", err)) + return 1 + } + + return 0 +} diff --git a/command/volume_register.go b/command/volume_register.go new file mode 100644 index 000000000..83d0307ba --- /dev/null +++ b/command/volume_register.go @@ -0,0 +1,130 @@ +package command + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/posener/complete" +) + +type VolumeRegisterCommand struct { + Meta +} + +func (c *VolumeRegisterCommand) Help() string { + helpText := ` +Usage: nomad volume register [options] + + Creates or updates a volume in Nomad. The volume must exist on the remote + storage provider before it can be used by a task. + + If the supplied path is "-" the volume file is read from stdin. Otherwise, it + is read from the file at the supplied path. + +General Options: + + ` + generalOptionsUsage() + + return strings.TrimSpace(helpText) +} + +func (c *VolumeRegisterCommand) AutocompleteFlags() complete.Flags { + return c.Meta.AutocompleteFlags(FlagSetClient) +} + +func (c *VolumeRegisterCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictFiles("*") +} + +func (c *VolumeRegisterCommand) Synopsis() string { + return "Create or update a volume" +} + +func (c *VolumeRegisterCommand) Name() string { return "volume register" } + +func (c *VolumeRegisterCommand) Run(args []string) int { + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + + if err := flags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) + return 1 + } + + // Check that we get exactly one argument + args = flags.Args() + if l := len(args); l != 1 { + c.Ui.Error("This command takes one argument: ") + c.Ui.Error(commandErrorText(c)) + return 1 + } + + // Read the file contents + file := args[0] + var rawVolume []byte + var err error + if file == "-" { + rawVolume, err = ioutil.ReadAll(os.Stdin) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to read stdin: %v", err)) + return 1 + } + } else { + rawVolume, err = ioutil.ReadFile(file) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to read file: %v", err)) + return 1 + } + } + + ast, volType, err := parseVolumeType(string(rawVolume)) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing the volume type: %s", err)) + return 1 + } + volType = strings.ToLower(volType) + + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + switch volType { + case "csi": + code := c.csiRegister(client, ast) + if code != 0 { + return code + } + default: + c.Ui.Error(fmt.Sprintf("Error unknown volume type: %s", volType)) + return 1 + } + + return 0 +} + +// parseVolume is used to parse the quota specification from HCL +func parseVolumeType(input string) (*ast.File, string, error) { + // Parse the AST first + ast, err := hcl.Parse(input) + if err != nil { + return nil, "", fmt.Errorf("parse error: %v", err) + } + + // Decode the type, so we can dispatch on it + dispatch := &struct { + T string `hcl:"type"` + }{} + err = hcl.DecodeObject(dispatch, ast) + if err != nil { + return nil, "", fmt.Errorf("dispatch error: %v", err) + } + + return ast, dispatch.T, nil +} diff --git a/command/volume_register_csi.go b/command/volume_register_csi.go new file mode 100644 index 000000000..a7f74f2f3 --- /dev/null +++ b/command/volume_register_csi.go @@ -0,0 +1,44 @@ +package command + +import ( + "fmt" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" +) + +func (c *VolumeRegisterCommand) csiRegister(client *api.Client, ast *ast.File) int { + vol, err := csiDecodeVolume(ast) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error decoding the volume definition: %s", err)) + return 1 + } + _, err = client.CSIVolumes().Register(vol, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error registering volume: %s", err)) + return 1 + } + + return 0 +} + +// parseVolume is used to parse the quota specification from HCL +func csiDecodeVolume(input *ast.File) (*api.CSIVolume, error) { + output := &api.CSIVolume{} + err := hcl.DecodeObject(output, input) + if err != nil { + return nil, err + } + + // api.CSIVolume doesn't have the type field, it's used only for dispatch in + // parseVolumeType + helper.RemoveEqualFold(&output.ExtraKeysHCL, "type") + err = helper.UnusedKeys(output) + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/command/volume_register_test.go b/command/volume_register_test.go new file mode 100644 index 000000000..d707f6171 --- /dev/null +++ b/command/volume_register_test.go @@ -0,0 +1,97 @@ +package command + +import ( + "testing" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/nomad/api" + "github.com/stretchr/testify/require" +) + +func TestVolumeDispatchParse(t *testing.T) { + t.Parallel() + + cases := []struct { + hcl string + t string + err string + }{{ + hcl: ` +type = "foo" +rando = "bar" +`, + t: "foo", + err: "", + }, { + hcl: `{"id": "foo", "type": "foo", "other": "bar"}`, + t: "foo", + err: "", + }} + + for _, c := range cases { + t.Run(c.hcl, func(t *testing.T) { + _, s, err := parseVolumeType(c.hcl) + require.Equal(t, c.t, s) + if c.err == "" { + require.NoError(t, err) + } else { + require.Contains(t, err.Error(), c.err) + } + + }) + } +} + +func TestCSIVolumeParse(t *testing.T) { + t.Parallel() + + cases := []struct { + hcl string + q *api.CSIVolume + err string + }{{ + hcl: ` +id = "foo" +type = "csi" +namespace = "n" +access_mode = "single-node-writer" +attachment_mode = "file-system" +plugin_id = "p" +`, + q: &api.CSIVolume{ + ID: "foo", + Namespace: "n", + AccessMode: "single-node-writer", + AttachmentMode: "file-system", + PluginID: "p", + }, + err: "", + }, { + hcl: ` +{"id": "foo", "namespace": "n", "type": "csi", "access_mode": "single-node-writer", "attachment_mode": "file-system", +"plugin_id": "p"} +`, + q: &api.CSIVolume{ + ID: "foo", + Namespace: "n", + AccessMode: "single-node-writer", + AttachmentMode: "file-system", + PluginID: "p", + }, + err: "", + }} + + for _, c := range cases { + t.Run(c.hcl, func(t *testing.T) { + ast, err := hcl.ParseString(c.hcl) + require.NoError(t, err) + vol, err := csiDecodeVolume(ast) + require.Equal(t, c.q, vol) + if c.err == "" { + require.NoError(t, err) + } else { + require.Contains(t, err.Error(), c.err) + } + }) + } +} diff --git a/command/volume_status.go b/command/volume_status.go new file mode 100644 index 000000000..a13be974f --- /dev/null +++ b/command/volume_status.go @@ -0,0 +1,134 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/nomad/api/contexts" + "github.com/posener/complete" +) + +type VolumeStatusCommand struct { + Meta + length int + short bool + verbose bool + json bool + template string +} + +func (c *VolumeStatusCommand) Help() string { + helpText := ` +Usage: nomad volume status [options] + + Display status information about a CSI volume. If no volume id is given, a + list of all volumes will be displayed. + +General Options: + + ` + generalOptionsUsage() + ` + +Status Options: + + -type + List only volumes of type . + + -short + Display short output. Used only when a single volume is being + queried, and drops verbose information about allocations. + + -verbose + Display full allocation information. + + -json + Output the allocation in its JSON format. + + -t + Format and display allocation using a Go template. +` + return strings.TrimSpace(helpText) +} + +func (c *VolumeStatusCommand) Synopsis() string { + return "Display status information about a volume" +} + +func (c *VolumeStatusCommand) AutocompleteFlags() complete.Flags { + return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), + complete.Flags{ + "-type": predictVolumeType, + "-short": complete.PredictNothing, + "-verbose": complete.PredictNothing, + "-json": complete.PredictNothing, + "-t": complete.PredictAnything, + }) +} + +func (c *VolumeStatusCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictFunc(func(a complete.Args) []string { + client, err := c.Meta.Client() + if err != nil { + return nil + } + + resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Volumes, nil) + if err != nil { + return []string{} + } + return resp.Matches[contexts.Volumes] + }) +} + +func (c *VolumeStatusCommand) Name() string { return "volume status" } + +func (c *VolumeStatusCommand) Run(args []string) int { + var typeArg string + + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + flags.StringVar(&typeArg, "type", "", "") + flags.BoolVar(&c.short, "short", false, "") + flags.BoolVar(&c.verbose, "verbose", false, "") + flags.BoolVar(&c.json, "json", false, "") + flags.StringVar(&c.template, "t", "", "") + + if err := flags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) + return 1 + } + + // Check that we either got no arguments or exactly one + args = flags.Args() + if len(args) > 1 { + c.Ui.Error("This command takes either no arguments or one: ") + c.Ui.Error(commandErrorText(c)) + return 1 + } + + // Truncate the id unless full length is requested + c.length = shortId + if c.verbose { + c.length = fullId + } + + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + id := "" + if len(args) == 1 { + id = args[0] + } + + code := c.csiStatus(client, id) + if code != 0 { + return code + } + + // Extend this section with other volume implementations + + return 0 +} diff --git a/command/volume_status_csi.go b/command/volume_status_csi.go new file mode 100644 index 000000000..95f6883bf --- /dev/null +++ b/command/volume_status_csi.go @@ -0,0 +1,191 @@ +package command + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/nomad/structs" +) + +func (c *VolumeStatusCommand) csiBanner() { + if !(c.json || len(c.template) > 0) { + c.Ui.Output(c.Colorize().Color("[bold]Container Storage Interface[reset]")) + } +} + +func (c *VolumeStatusCommand) csiStatus(client *api.Client, id string) int { + // Invoke list mode if no volume id + if id == "" { + c.csiBanner() + vols, _, err := client.CSIVolumes().List(nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying volumes: %s", err)) + return 1 + } + + if len(vols) == 0 { + // No output if we have no volumes + c.Ui.Error("No CSI volumes") + } else { + str, err := c.csiFormatVolumes(vols) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) + return 1 + } + c.Ui.Output(str) + } + return 0 + } + + // Try querying the volume + vol, _, err := client.CSIVolumes().Info(id, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying volume: %s", err)) + return 1 + } + + str, err := c.formatBasic(vol) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error formatting volume: %s", err)) + return 1 + } + c.Ui.Output(str) + + return 0 +} + +func (c *VolumeStatusCommand) csiFormatVolumes(vols []*api.CSIVolumeListStub) (string, error) { + // Sort the output by volume id + sort.Slice(vols, func(i, j int) bool { return vols[i].ID < vols[j].ID }) + + if c.json || len(c.template) > 0 { + out, err := Format(c.json, c.template, vols) + if err != nil { + return "", fmt.Errorf("format error: %v", err) + } + return out, nil + } + + rows := make([]string, len(vols)+1) + rows[0] = "ID|Name|Plugin ID|Schedulable|Access Mode" + for i, v := range vols { + rows[i+1] = fmt.Sprintf("%s|%s|%s|%t|%s", + limit(v.ID, c.length), + v.Name, + v.PluginID, + v.Schedulable, + v.AccessMode, + ) + } + return formatList(rows), nil +} + +func (c *VolumeStatusCommand) formatBasic(vol *api.CSIVolume) (string, error) { + if c.json || len(c.template) > 0 { + out, err := Format(c.json, c.template, vol) + if err != nil { + return "", fmt.Errorf("format error: %v", err) + } + return out, nil + } + + output := []string{ + fmt.Sprintf("ID|%s", vol.ID), + fmt.Sprintf("Name|%s", vol.Name), + fmt.Sprintf("External ID|%s", vol.ExternalID), + fmt.Sprintf("Plugin ID|%s", vol.PluginID), + fmt.Sprintf("Provider|%s", vol.Provider), + fmt.Sprintf("Version|%s", vol.ProviderVersion), + fmt.Sprintf("Schedulable|%t", vol.Schedulable), + fmt.Sprintf("Controllers Healthy|%d", vol.ControllersHealthy), + fmt.Sprintf("Controllers Expected|%d", vol.ControllersExpected), + fmt.Sprintf("Nodes Healthy|%d", vol.NodesHealthy), + fmt.Sprintf("Nodes Expected|%d", vol.NodesExpected), + + fmt.Sprintf("Access Mode|%s", vol.AccessMode), + fmt.Sprintf("Attachment Mode|%s", vol.AttachmentMode), + fmt.Sprintf("Mount Options|%s", csiVolMountOption(vol.MountOptions, nil)), + fmt.Sprintf("Namespace|%s", vol.Namespace), + } + + // Exit early + if c.short { + return formatKV(output), nil + } + + // Format the allocs + banner := c.Colorize().Color("\n[bold]Allocations[reset]") + allocs := formatAllocListStubs(vol.Allocations, c.verbose, c.length) + full := []string{formatKV(output), banner, allocs} + return strings.Join(full, "\n"), nil +} + +func (c *VolumeStatusCommand) formatTopologies(vol *api.CSIVolume) string { + var out []string + + // Find the union of all the keys + head := map[string]string{} + for _, t := range vol.Topologies { + for key := range t.Segments { + if _, ok := head[key]; !ok { + head[key] = "" + } + } + } + + // Append the header + var line []string + for key := range head { + line = append(line, key) + } + out = append(out, strings.Join(line, " ")) + + // Append each topology + for _, t := range vol.Topologies { + line = []string{} + for key := range head { + line = append(line, t.Segments[key]) + } + out = append(out, strings.Join(line, " ")) + } + + return strings.Join(out, "\n") +} + +func csiVolMountOption(volume, request *api.CSIMountOptions) string { + var req, opts *structs.CSIMountOptions + + if request != nil { + req = &structs.CSIMountOptions{ + FSType: request.FSType, + MountFlags: request.MountFlags, + } + } + + if volume == nil { + opts = req + } else { + opts = &structs.CSIMountOptions{ + FSType: volume.FSType, + MountFlags: volume.MountFlags, + } + opts.Merge(req) + } + + if opts == nil { + return "" + } + + var out string + if opts.FSType != "" { + out = fmt.Sprintf("fs_type: %s", opts.FSType) + } + + if len(opts.MountFlags) > 0 { + out = fmt.Sprintf("%s flags: %s", out, strings.Join(opts.MountFlags, ", ")) + } + + return out +} diff --git a/command/volume_status_test.go b/command/volume_status_test.go new file mode 100644 index 000000000..a3c6a5b20 --- /dev/null +++ b/command/volume_status_test.go @@ -0,0 +1,58 @@ +package command + +import ( + "testing" + + "github.com/hashicorp/nomad/helper/uuid" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/mitchellh/cli" + "github.com/posener/complete" + "github.com/stretchr/testify/require" +) + +func TestCSIVolumeStatusCommand_Implements(t *testing.T) { + t.Parallel() + var _ cli.Command = &VolumeStatusCommand{} +} + +func TestCSIVolumeStatusCommand_Fails(t *testing.T) { + t.Parallel() + ui := new(cli.MockUi) + cmd := &VolumeStatusCommand{Meta: Meta{Ui: ui}} + + // Fails on misuse + code := cmd.Run([]string{"some", "bad", "args"}) + require.Equal(t, 1, code) + + out := ui.ErrorWriter.String() + require.Contains(t, out, commandErrorText(cmd)) + ui.ErrorWriter.Reset() +} + +func TestCSIVolumeStatusCommand_AutocompleteArgs(t *testing.T) { + t.Parallel() + + srv, _, url := testServer(t, true, nil) + defer srv.Shutdown() + + ui := new(cli.MockUi) + cmd := &VolumeStatusCommand{Meta: Meta{Ui: ui, flagAddress: url}} + + state := srv.Agent.Server().State() + + vol := &structs.CSIVolume{ + ID: uuid.Generate(), + Namespace: "default", + PluginID: "glade", + } + + require.NoError(t, state.CSIVolumeRegister(1000, []*structs.CSIVolume{vol})) + + prefix := vol.ID[:len(vol.ID)-5] + args := complete.Args{Last: prefix} + predictor := cmd.AutocompleteArgs() + + res := predictor.Predict(args) + require.Equal(t, 1, len(res)) + require.Equal(t, vol.ID, res[0]) +} diff --git a/contributing/checklist-command.md b/contributing/checklist-command.md index 399767616..6d99b24d1 100644 --- a/contributing/checklist-command.md +++ b/contributing/checklist-command.md @@ -19,6 +19,7 @@ CLI (command/) -> API Client (api/) -> HTTP API (command/agent) -> RPC (nomad/) * [ ] Implement `-verbose` (expands truncated UUIDs, adds other detail) * [ ] Update help text * [ ] Implement and test new HTTP endpoint in `command/agent/_endpoint.go` +* [ ] Register new URL paths in `command/agent/http.go` * [ ] Implement and test new RPC endpoint in `nomad/_endpoint.go` * [ ] Implement and test new Client RPC endpoint in `client/_endpoint.go` (For client endpoints like Filesystem only) diff --git a/contributing/checklist-rpc-endpoint.md b/contributing/checklist-rpc-endpoint.md index d6f62299d..8726cbfad 100644 --- a/contributing/checklist-rpc-endpoint.md +++ b/contributing/checklist-rpc-endpoint.md @@ -7,19 +7,27 @@ Prefer adding a new message to changing any existing RPC messages. * [ ] `Request` struct and `*RequestType` constant in `nomad/structs/structs.go`. Append the constant, old constant values must remain unchanged -* [ ] In `nomad/fsm.go`, add a dispatch case to the switch statement in `Apply` + +* [ ] In `nomad/fsm.go`, add a dispatch case to the switch statement in `(n *nomadFSM) Apply` * `*nomadFSM` method to decode the request and call the state method + * [ ] State method for modifying objects in a `Txn` in `nomad/state/state_store.go` * `nomad/state/state_store_test.go` + * [ ] Handler for the request in `nomad/foo_endpoint.go` * RPCs are resolved by matching the method name for bound structs [net/rpc](https://golang.org/pkg/net/rpc/) * Check ACLs for security, list endpoints filter by ACL + * Register new RPC struct in `nomad/server.go` + * Check ACLs to enforce security + * Wrapper for the HTTP request in `command/agent/foo_endpoint.go` * Backwards compatibility requires a new endpoint, an upgraded client or server may be forwarding this request to an old server, without support for the new RPC * RPCs triggered by an internal process may not need support + * Check ACLs as an optimization + * [ ] `nomad/core_sched.go` sends many RPCs * `ServersMeetMinimumVersion` asserts that the server cluster is upgraded, so use this to gaurd sending the new RPC, else send the old RPC diff --git a/e2e/.gitignore b/e2e/.gitignore index cfc151d21..adad33a7b 100644 --- a/e2e/.gitignore +++ b/e2e/.gitignore @@ -1 +1,2 @@ provisioning.json +csi/input/volumes.json diff --git a/e2e/csi/csi.go b/e2e/csi/csi.go new file mode 100644 index 000000000..4029e103d --- /dev/null +++ b/e2e/csi/csi.go @@ -0,0 +1,251 @@ +package csi + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "os" + "time" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/e2e/e2eutil" + "github.com/hashicorp/nomad/e2e/framework" + "github.com/hashicorp/nomad/helper/uuid" + "github.com/stretchr/testify/require" +) + +type CSIVolumesTest struct { + framework.TC + jobIds []string + volumeIDs *volumeConfig +} + +func init() { + framework.AddSuites(&framework.TestSuite{ + Component: "CSI", + CanRunLocal: true, + Consul: false, + Cases: []framework.TestCase{ + new(CSIVolumesTest), + }, + }) +} + +type volumeConfig struct { + EBSVolumeID string `json:"ebs_volume"` + EFSVolumeID string `json:"efs_volume"` +} + +func (tc *CSIVolumesTest) BeforeAll(f *framework.F) { + t := f.T() + // The volume IDs come from the external provider, so we need + // to read the configuration out of our Terraform output. + rawjson, err := ioutil.ReadFile("csi/input/volumes.json") + if err != nil { + t.Skip("volume ID configuration not found, try running 'terraform output volumes > ../csi/input/volumes.json'") + } + volumeIDs := &volumeConfig{} + err = json.Unmarshal(rawjson, volumeIDs) + if err != nil { + t.Fatal("volume ID configuration could not be read") + } + + tc.volumeIDs = volumeIDs + + // Ensure cluster has leader and at least two client + // nodes in a ready state before running tests + e2eutil.WaitForLeader(t, tc.Nomad()) + e2eutil.WaitForNodesReady(t, tc.Nomad(), 2) +} + +// TestEBSVolumeClaim launches AWS EBS plugins and registers an EBS volume +// as a Nomad CSI volume. We then deploy a job that writes to the volume, +// stop that job, and reuse the volume for another job which should be able +// to read the data written by the first job. +func (tc *CSIVolumesTest) TestEBSVolumeClaim(f *framework.F) { + t := f.T() + require := require.New(t) + nomadClient := tc.Nomad() + uuid := uuid.Generate() + + // deploy the controller plugin job + controllerJobID := "aws-ebs-plugin-controller-" + uuid[0:8] + tc.jobIds = append(tc.jobIds, controllerJobID) + e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/plugin-aws-ebs-controller.nomad", controllerJobID, "") + + // deploy the node plugins job + nodesJobID := "aws-ebs-plugin-nodes-" + uuid[0:8] + tc.jobIds = append(tc.jobIds, nodesJobID) + e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/plugin-aws-ebs-nodes.nomad", nodesJobID, "") + + // wait for plugin to become healthy + require.Eventually(func() bool { + plugin, _, err := nomadClient.CSIPlugins().Info("aws-ebs0", nil) + if err != nil { + return false + } + if plugin.ControllersHealthy != 1 || plugin.NodesHealthy < 2 { + return false + } + return true + // TODO(tgross): cut down this time after fixing + // https://github.com/hashicorp/nomad/issues/7296 + }, 90*time.Second, 5*time.Second) + + // register a volume + volID := "ebs-vol0" + vol := &api.CSIVolume{ + ID: volID, + Name: volID, + ExternalID: tc.volumeIDs.EBSVolumeID, + AccessMode: "single-node-writer", + AttachmentMode: "file-system", + PluginID: "aws-ebs0", + } + _, err := nomadClient.CSIVolumes().Register(vol, nil) + require.NoError(err) + defer nomadClient.CSIVolumes().Deregister(volID, nil) + + // deploy a job that writes to the volume + writeJobID := "write-ebs-" + uuid[0:8] + tc.jobIds = append(tc.jobIds, writeJobID) + writeAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/use-ebs-volume.nomad", writeJobID, "") + writeAllocID := writeAllocs[0].ID + e2eutil.WaitForAllocRunning(t, nomadClient, writeAllocID) + + // read data from volume and assert the writer wrote a file to it + writeAlloc, _, err := nomadClient.Allocations().Info(writeAllocID, nil) + require.NoError(err) + expectedPath := "/local/test/" + writeAllocID + _, err = readFile(nomadClient, writeAlloc, expectedPath) + require.NoError(err) + + // Shutdown the writer so we can run a reader. + // we could mount the EBS volume with multi-attach, but we + // want this test to exercise the unpublish workflow. + nomadClient.Jobs().Deregister(writeJobID, true, nil) + + // deploy a job so we can read from the volume + readJobID := "read-ebs-" + uuid[0:8] + tc.jobIds = append(tc.jobIds, readJobID) + readAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/use-ebs-volume.nomad", readJobID, "") + readAllocID := readAllocs[0].ID + e2eutil.WaitForAllocRunning(t, nomadClient, readAllocID) + + // ensure we clean up claim before we deregister volumes + defer nomadClient.Jobs().Deregister(readJobID, true, nil) + + // read data from volume and assert the writer wrote a file to it + readAlloc, _, err := nomadClient.Allocations().Info(readAllocID, nil) + require.NoError(err) + _, err = readFile(nomadClient, readAlloc, expectedPath) + require.NoError(err) +} + +// TestEFSVolumeClaim launches AWS EFS plugins and registers an EFS volume +// as a Nomad CSI volume. We then deploy a job that writes to the volume, +// and share the volume with another job which should be able to read the +// data written by the first job. +func (tc *CSIVolumesTest) TestEFSVolumeClaim(f *framework.F) { + t := f.T() + require := require.New(t) + nomadClient := tc.Nomad() + uuid := uuid.Generate() + + // deploy the node plugins job (no need for a controller for EFS) + nodesJobID := "aws-efs-plugin-nodes-" + uuid[0:8] + tc.jobIds = append(tc.jobIds, nodesJobID) + e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/plugin-aws-efs-nodes.nomad", nodesJobID, "") + + // wait for plugin to become healthy + require.Eventually(func() bool { + plugin, _, err := nomadClient.CSIPlugins().Info("aws-efs0", nil) + if err != nil { + return false + } + if plugin.NodesHealthy < 2 { + return false + } + return true + // TODO(tgross): cut down this time after fixing + // https://github.com/hashicorp/nomad/issues/7296 + }, 90*time.Second, 5*time.Second) + + // register a volume + volID := "efs-vol0" + vol := &api.CSIVolume{ + ID: volID, + Name: volID, + ExternalID: tc.volumeIDs.EFSVolumeID, + AccessMode: "single-node-writer", + AttachmentMode: "file-system", + PluginID: "aws-efs0", + } + _, err := nomadClient.CSIVolumes().Register(vol, nil) + require.NoError(err) + defer nomadClient.CSIVolumes().Deregister(volID, nil) + + // deploy a job that writes to the volume + writeJobID := "write-efs-" + uuid[0:8] + writeAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/use-efs-volume-write.nomad", writeJobID, "") + writeAllocID := writeAllocs[0].ID + e2eutil.WaitForAllocRunning(t, nomadClient, writeAllocID) + + // read data from volume and assert the writer wrote a file to it + writeAlloc, _, err := nomadClient.Allocations().Info(writeAllocID, nil) + require.NoError(err) + expectedPath := "/local/test/" + writeAllocID + _, err = readFile(nomadClient, writeAlloc, expectedPath) + require.NoError(err) + + // Shutdown the writer so we can run a reader. + // although EFS should support multiple readers, the plugin + // does not. + nomadClient.Jobs().Deregister(writeJobID, true, nil) + + // deploy a job that reads from the volume. + readJobID := "read-efs-" + uuid[0:8] + readAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/use-efs-volume-read.nomad", readJobID, "") + defer nomadClient.Jobs().Deregister(readJobID, true, nil) + e2eutil.WaitForAllocRunning(t, nomadClient, readAllocs[0].ID) + + // read data from volume and assert the writer wrote a file to it + readAlloc, _, err := nomadClient.Allocations().Info(readAllocs[0].ID, nil) + require.NoError(err) + _, err = readFile(nomadClient, readAlloc, expectedPath) + require.NoError(err) +} + +func (tc *CSIVolumesTest) AfterEach(f *framework.F) { + nomadClient := tc.Nomad() + jobs := nomadClient.Jobs() + // Stop all jobs in test + for _, id := range tc.jobIds { + jobs.Deregister(id, true, nil) + } + // Garbage collect + nomadClient.System().GarbageCollect() +} + +// TODO(tgross): replace this w/ AllocFS().Stat() after +// https://github.com/hashicorp/nomad/issues/7365 is fixed +func readFile(client *api.Client, alloc *api.Allocation, path string) (bytes.Buffer, error) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + var stdout, stderr bytes.Buffer + _, err := client.Allocations().Exec(ctx, + alloc, "task", false, + []string{"cat", path}, + os.Stdin, &stdout, &stderr, + make(chan api.TerminalSize), nil) + return stdout, err +} diff --git a/e2e/csi/input/plugin-aws-ebs-controller.nomad b/e2e/csi/input/plugin-aws-ebs-controller.nomad new file mode 100644 index 000000000..e5caa730c --- /dev/null +++ b/e2e/csi/input/plugin-aws-ebs-controller.nomad @@ -0,0 +1,40 @@ +# jobspec for running CSI plugin for AWS EBS, derived from +# the kubernetes manifests found at +# https://github.com/kubernetes-sigs/aws-ebs-csi-driver/tree/master/deploy/kubernetes + +job "plugin-aws-ebs-controller" { + datacenters = ["dc1"] + + group "controller" { + task "plugin" { + driver = "docker" + + config { + image = "amazon/aws-ebs-csi-driver:latest" + + args = [ + "controller", + "--endpoint=unix://csi/csi.sock", + "--logtostderr", + "--v=5", + ] + + # note: plugins running as controllers don't + # need to run as privileged tasks + } + + csi_plugin { + id = "aws-ebs0" + type = "controller" + mount_dir = "/csi" + } + + # note: there's no upstream guidance on resource usage so + # this is a best guess until we profile it in heavy use + resources { + cpu = 500 + memory = 256 + } + } + } +} diff --git a/e2e/csi/input/plugin-aws-ebs-nodes.nomad b/e2e/csi/input/plugin-aws-ebs-nodes.nomad new file mode 100644 index 000000000..303b2a8e8 --- /dev/null +++ b/e2e/csi/input/plugin-aws-ebs-nodes.nomad @@ -0,0 +1,43 @@ +# jobspec for running CSI plugin for AWS EBS, derived from +# the kubernetes manifests found at +# https://github.com/kubernetes-sigs/aws-ebs-csi-driver/tree/master/deploy/kubernetes + +job "plugin-aws-ebs-nodes" { + datacenters = ["dc1"] + + # you can run node plugins as service jobs as well, but this ensures + # that all nodes in the DC have a copy. + type = "system" + + group "nodes" { + task "plugin" { + driver = "docker" + + config { + image = "amazon/aws-ebs-csi-driver:latest" + + args = [ + "node", + "--endpoint=unix://csi/csi.sock", + "--logtostderr", + "--v=5", + ] + + privileged = true + } + + csi_plugin { + id = "aws-ebs0" + type = "node" + mount_dir = "/csi" + } + + # note: there's no upstream guidance on resource usage so + # this is a best guess until we profile it in heavy use + resources { + cpu = 500 + memory = 256 + } + } + } +} diff --git a/e2e/csi/input/plugin-aws-efs-nodes.nomad b/e2e/csi/input/plugin-aws-efs-nodes.nomad new file mode 100644 index 000000000..8e1429e7a --- /dev/null +++ b/e2e/csi/input/plugin-aws-efs-nodes.nomad @@ -0,0 +1,45 @@ +# jobspec for running CSI plugin for AWS EFS, derived from +# the kubernetes manifests found at +# https://github.com/kubernetes-sigs/aws-efs-csi-driver/tree/master/deploy/kubernetes + +job "plugin-aws-efs-nodes" { + datacenters = ["dc1"] + + # you can run node plugins as service jobs as well, but this ensures + # that all nodes in the DC have a copy. + type = "system" + + group "nodes" { + task "plugin" { + driver = "docker" + + config { + image = "amazon/aws-efs-csi-driver:latest" + + # note: the EFS driver doesn't seem to respect the --endpoint + # flag and always sets up the listener at '/tmp/csi.sock' + args = [ + "node", + "--endpoint=unix://tmp/csi.sock", + "--logtostderr", + "--v=5", + ] + + privileged = true + } + + csi_plugin { + id = "aws-efs0" + type = "node" + mount_dir = "/tmp" + } + + # note: there's no upstream guidance on resource usage so + # this is a best guess until we profile it in heavy use + resources { + cpu = 500 + memory = 256 + } + } + } +} diff --git a/e2e/csi/input/use-ebs-volume.nomad b/e2e/csi/input/use-ebs-volume.nomad new file mode 100644 index 000000000..866a6a4dc --- /dev/null +++ b/e2e/csi/input/use-ebs-volume.nomad @@ -0,0 +1,32 @@ +# a job that mounts an EBS volume and writes its job ID as a file +job "use-ebs-volume" { + datacenters = ["dc1"] + + group "group" { + volume "test" { + type = "csi" + source = "ebs-vol0" + } + + task "task" { + driver = "docker" + + config { + image = "busybox:1" + command = "/bin/sh" + args = ["-c", "touch /local/test/${NOMAD_ALLOC_ID}; sleep 3600"] + } + + volume_mount { + volume = "test" + destination = "${NOMAD_TASK_DIR}/test" + read_only = false + } + + resources { + cpu = 500 + memory = 128 + } + } + } +} diff --git a/e2e/csi/input/use-efs-volume-read.nomad b/e2e/csi/input/use-efs-volume-read.nomad new file mode 100644 index 000000000..12b5f56b2 --- /dev/null +++ b/e2e/csi/input/use-efs-volume-read.nomad @@ -0,0 +1,33 @@ +# a job that mounts the EFS volume and sleeps, so that we can +# read its mounted file system remotely +job "use-efs-volume" { + datacenters = ["dc1"] + + group "group" { + volume "test" { + type = "csi" + source = "efs-vol0" + } + + task "task" { + driver = "docker" + + config { + image = "busybox:1" + command = "/bin/sh" + args = ["-c", "sleep 3600"] + } + + volume_mount { + volume = "test" + destination = "${NOMAD_TASK_DIR}/test" + read_only = true + } + + resources { + cpu = 500 + memory = 128 + } + } + } +} diff --git a/e2e/csi/input/use-efs-volume-write.nomad b/e2e/csi/input/use-efs-volume-write.nomad new file mode 100644 index 000000000..912fa734f --- /dev/null +++ b/e2e/csi/input/use-efs-volume-write.nomad @@ -0,0 +1,32 @@ +# a job that mounts an EFS volume and writes its job ID as a file +job "use-efs-volume" { + datacenters = ["dc1"] + + group "group" { + volume "test" { + type = "csi" + source = "efs-vol0" + } + + task "task" { + driver = "docker" + + config { + image = "busybox:1" + command = "/bin/sh" + args = ["-c", "touch /local/test/${NOMAD_ALLOC_ID}; sleep 3600"] + } + + volume_mount { + volume = "test" + destination = "${NOMAD_TASK_DIR}/test" + read_only = false + } + + resources { + cpu = 500 + memory = 128 + } + } + } +} diff --git a/e2e/e2e_test.go b/e2e/e2e_test.go index 8e3d0bf75..8b63e1b5b 100644 --- a/e2e/e2e_test.go +++ b/e2e/e2e_test.go @@ -13,6 +13,7 @@ import ( _ "github.com/hashicorp/nomad/e2e/connect" _ "github.com/hashicorp/nomad/e2e/consul" _ "github.com/hashicorp/nomad/e2e/consultemplate" + _ "github.com/hashicorp/nomad/e2e/csi" _ "github.com/hashicorp/nomad/e2e/deployment" _ "github.com/hashicorp/nomad/e2e/example" _ "github.com/hashicorp/nomad/e2e/hostvolumes" diff --git a/e2e/terraform/iam.tf b/e2e/terraform/iam.tf index 484d0c1ce..8cf30ed3c 100644 --- a/e2e/terraform/iam.tf +++ b/e2e/terraform/iam.tf @@ -48,6 +48,7 @@ data "aws_iam_policy_document" "auto_discover_cluster" { "ec2:DescribeTags", "ec2:DescribeVolume*", "ec2:AttachVolume", + "ec2:DetachVolume", "autoscaling:DescribeAutoScalingGroups", ] resources = ["*"] diff --git a/e2e/terraform/provisioning.tf b/e2e/terraform/provisioning.tf index 5e68d22ca..5d69b13b4 100644 --- a/e2e/terraform/provisioning.tf +++ b/e2e/terraform/provisioning.tf @@ -9,6 +9,15 @@ export NOMAD_E2E=1 EOM } +output "volumes" { + description = "get volume IDs needed to register volumes for CSI testing." + value = jsonencode( + { + "ebs_volume" : aws_ebs_volume.csi.id, + "efs_volume" : aws_efs_file_system.csi.id, + }) +} + output "provisioning" { description = "output to a file to be use w/ E2E framework -provision.terraform" value = jsonencode( diff --git a/helper/funcs.go b/helper/funcs.go index 7a6b4c151..c75294a1b 100644 --- a/helper/funcs.go +++ b/helper/funcs.go @@ -3,7 +3,9 @@ package helper import ( "crypto/sha512" "fmt" + "reflect" "regexp" + "strings" "time" multierror "github.com/hashicorp/go-multierror" @@ -387,3 +389,75 @@ func CheckHCLKeys(node ast.Node, valid []string) error { return result } + +// UnusedKeys returns a pretty-printed error if any `hcl:",unusedKeys"` is not empty +func UnusedKeys(obj interface{}) error { + val := reflect.ValueOf(obj) + if val.Kind() == reflect.Ptr { + val = reflect.Indirect(val) + } + return unusedKeysImpl([]string{}, val) +} + +func unusedKeysImpl(path []string, val reflect.Value) error { + stype := val.Type() + for i := 0; i < stype.NumField(); i++ { + ftype := stype.Field(i) + fval := val.Field(i) + tags := strings.Split(ftype.Tag.Get("hcl"), ",") + name := tags[0] + tags = tags[1:] + + if fval.Kind() == reflect.Ptr { + fval = reflect.Indirect(fval) + } + + // struct? recurse. Add the struct's key to the path + if fval.Kind() == reflect.Struct { + err := unusedKeysImpl(append([]string{name}, path...), fval) + if err != nil { + return err + } + continue + } + + // Search the hcl tags for "unusedKeys" + unusedKeys := false + for _, p := range tags { + if p == "unusedKeys" { + unusedKeys = true + break + } + } + + if unusedKeys { + ks, ok := fval.Interface().([]string) + if ok && len(ks) != 0 { + ps := "" + if len(path) > 0 { + ps = strings.Join(path, ".") + " " + } + return fmt.Errorf("%sunexpected keys %s", + ps, + strings.Join(ks, ", ")) + } + } + } + return nil +} + +// RemoveEqualFold removes the first string that EqualFold matches. It updates xs in place +func RemoveEqualFold(xs *[]string, search string) { + sl := *xs + for i, x := range sl { + if strings.EqualFold(x, search) { + sl = append(sl[:i], sl[i+1:]...) + if len(sl) == 0 { + *xs = nil + } else { + *xs = sl + } + return + } + } +} diff --git a/helper/grpc-middleware/logging/client_interceptors.go b/helper/grpc-middleware/logging/client_interceptors.go new file mode 100644 index 000000000..6d19036fe --- /dev/null +++ b/helper/grpc-middleware/logging/client_interceptors.go @@ -0,0 +1,42 @@ +package logging + +import ( + "context" + "path" + "time" + + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc" + "google.golang.org/grpc/status" +) + +// UnaryClientInterceptor returns a new unary client interceptor that logs the execution of gRPC calls. +func UnaryClientInterceptor(logger hclog.Logger, opts ...Option) grpc.UnaryClientInterceptor { + o := evaluateClientOpt(opts) + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + startTime := time.Now() + err := invoker(ctx, method, req, reply, cc, opts...) + emitClientLog(logger, o, method, startTime, err, "finished client unary call") + return err + } +} + +// StreamClientInterceptor returns a new streaming client interceptor that logs the execution of gRPC calls. +func StreamClientInterceptor(logger hclog.Logger, opts ...Option) grpc.StreamClientInterceptor { + o := evaluateClientOpt(opts) + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + startTime := time.Now() + clientStream, err := streamer(ctx, desc, cc, method, opts...) + emitClientLog(logger, o, method, startTime, err, "finished client streaming call") + return clientStream, err + } +} + +func emitClientLog(logger hclog.Logger, o *options, fullMethodString string, startTime time.Time, err error, msg string) { + code := status.Code(err) + logLevel := o.levelFunc(code) + reqDuration := time.Now().Sub(startTime) + service := path.Dir(fullMethodString)[1:] + method := path.Base(fullMethodString) + logger.Log(logLevel, msg, "grpc.code", code, "duration", reqDuration, "grpc.service", service, "grpc.method", method) +} diff --git a/helper/grpc-middleware/logging/options.go b/helper/grpc-middleware/logging/options.go new file mode 100644 index 000000000..03e63b5d3 --- /dev/null +++ b/helper/grpc-middleware/logging/options.go @@ -0,0 +1,89 @@ +package logging + +import ( + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc/codes" +) + +type options struct { + levelFunc CodeToLevel +} + +var defaultOptions = &options{} + +type Option func(*options) + +func evaluateClientOpt(opts []Option) *options { + optCopy := &options{} + *optCopy = *defaultOptions + optCopy.levelFunc = DefaultCodeToLevel + for _, o := range opts { + o(optCopy) + } + return optCopy +} + +func WithStatusCodeToLevelFunc(fn CodeToLevel) Option { + return func(opts *options) { + opts.levelFunc = fn + } +} + +// CodeToLevel function defines the mapping between gRPC return codes and hclog level. +type CodeToLevel func(code codes.Code) hclog.Level + +func DefaultCodeToLevel(code codes.Code) hclog.Level { + switch code { + // Trace Logs -- Useful for Nomad developers but not necessarily always wanted + case codes.OK: + return hclog.Trace + + // Debug logs + case codes.Canceled: + return hclog.Debug + case codes.InvalidArgument: + return hclog.Debug + case codes.ResourceExhausted: + return hclog.Debug + case codes.FailedPrecondition: + return hclog.Debug + case codes.Aborted: + return hclog.Debug + case codes.OutOfRange: + return hclog.Debug + case codes.NotFound: + return hclog.Debug + case codes.AlreadyExists: + return hclog.Debug + + // Info Logs - More curious/interesting than debug, but not necessarily critical + case codes.Unknown: + return hclog.Info + case codes.DeadlineExceeded: + return hclog.Info + case codes.PermissionDenied: + return hclog.Info + case codes.Unauthenticated: + // unauthenticated requests are probably usually fine? + return hclog.Info + case codes.Unavailable: + // unavailable errors indicate the upstream is not currently available. Info + // because I would guess these are usually transient and will be handled by + // retry mechanisms before being served as a higher level warning. + return hclog.Info + + // Warn Logs - These are almost definitely bad in most cases - usually because + // the upstream is broken. + case codes.Unimplemented: + return hclog.Warn + case codes.Internal: + return hclog.Warn + case codes.DataLoss: + return hclog.Warn + + default: + // Codes that aren't implemented as part of a CodeToLevel case are probably + // unknown and should be surfaced. + return hclog.Info + } +} diff --git a/helper/mount/mount.go b/helper/mount/mount.go new file mode 100644 index 000000000..cd3e5f362 --- /dev/null +++ b/helper/mount/mount.go @@ -0,0 +1,16 @@ +package mount + +// Mounter defines the set of methods to allow for mount operations on a system. +type Mounter interface { + // IsNotAMountPoint detects if a provided directory is not a mountpoint. + IsNotAMountPoint(file string) (bool, error) + + // Mount will mount filesystem according to the specified configuration, on + // the condition that the target path is *not* already mounted. Options must + // be specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". + Mount(device, target, mountType, options string) error +} + +// Compile-time check to ensure all Mounter implementations satisfy +// the mount interface. +var _ Mounter = &mounter{} diff --git a/helper/mount/mount_linux.go b/helper/mount/mount_linux.go new file mode 100644 index 000000000..4e4f2ab69 --- /dev/null +++ b/helper/mount/mount_linux.go @@ -0,0 +1,31 @@ +// +build linux + +package mount + +import ( + docker_mount "github.com/docker/docker/pkg/mount" +) + +// mounter provides the default implementation of mount.Mounter +// for the linux platform. +// Currently it delegates to the docker `mount` package. +type mounter struct { +} + +// New returns a Mounter for the current system. +func New() Mounter { + return &mounter{} +} + +// IsNotAMountPoint determines if a directory is not a mountpoint. +// It does this by checking the path against the contents of /proc/self/mountinfo +func (m *mounter) IsNotAMountPoint(path string) (bool, error) { + isMount, err := docker_mount.Mounted(path) + return !isMount, err +} + +func (m *mounter) Mount(device, target, mountType, options string) error { + // Defer to the docker implementation of `Mount`, it's correct enough for our + // usecase and avoids us needing to shell out to the `mount` utility. + return docker_mount.Mount(device, target, mountType, options) +} diff --git a/helper/mount/mount_unsupported.go b/helper/mount/mount_unsupported.go new file mode 100644 index 000000000..8605aded4 --- /dev/null +++ b/helper/mount/mount_unsupported.go @@ -0,0 +1,25 @@ +// +build !linux + +package mount + +import ( + "errors" +) + +// mounter provides the default implementation of mount.Mounter +// for unsupported platforms. +type mounter struct { +} + +// New returns a Mounter for the current system. +func New() Mounter { + return &mounter{} +} + +func (m *mounter) IsNotAMountPoint(path string) (bool, error) { + return false, errors.New("Unsupported platform") +} + +func (m *mounter) Mount(device, target, mountType, options string) error { + return errors.New("Unsupported platform") +} diff --git a/jobspec/parse_group.go b/jobspec/parse_group.go index 062c96074..86c078658 100644 --- a/jobspec/parse_group.go +++ b/jobspec/parse_group.go @@ -295,41 +295,17 @@ func parseRestartPolicy(final **api.RestartPolicy, list *ast.ObjectList) error { } func parseVolumes(out *map[string]*api.VolumeRequest, list *ast.ObjectList) error { - volumes := make(map[string]*api.VolumeRequest, len(list.Items)) + hcl.DecodeObject(out, list) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - valid := []string{ - "type", - "read_only", - "hidden", - "source", - } - if err := helper.CheckHCLKeys(item.Val, valid); err != nil { - return err - } - - var m map[string]interface{} - if err := hcl.DecodeObject(&m, item.Val); err != nil { - return err - } - - var result api.VolumeRequest - dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - WeaklyTypedInput: true, - Result: &result, - }) + for k, v := range *out { + err := helper.UnusedKeys(v) if err != nil { return err } - if err := dec.Decode(m); err != nil { - return err - } - - result.Name = n - volumes[n] = &result + // This is supported by `hcl:",key"`, but that only works if we start at the + // parent ast.ObjectItem + v.Name = k } - *out = volumes return nil } diff --git a/jobspec/parse_task.go b/jobspec/parse_task.go index dbd20abdd..a59c88331 100644 --- a/jobspec/parse_task.go +++ b/jobspec/parse_task.go @@ -74,6 +74,7 @@ func parseTask(item *ast.ObjectItem) (*api.Task, error) { "kill_signal", "kind", "volume_mount", + "csi_plugin", } if err := helper.CheckHCLKeys(listVal, valid); err != nil { return nil, err @@ -97,6 +98,7 @@ func parseTask(item *ast.ObjectItem) (*api.Task, error) { delete(m, "template") delete(m, "vault") delete(m, "volume_mount") + delete(m, "csi_plugin") // Build the task var t api.Task @@ -135,6 +137,25 @@ func parseTask(item *ast.ObjectItem) (*api.Task, error) { t.Services = services } + if o := listVal.Filter("csi_plugin"); len(o.Items) > 0 { + if len(o.Items) != 1 { + return nil, fmt.Errorf("csi_plugin -> Expected single stanza, got %d", len(o.Items)) + } + i := o.Elem().Items[0] + + var m map[string]interface{} + if err := hcl.DecodeObject(&m, i.Val); err != nil { + return nil, err + } + + var cfg api.TaskCSIPluginConfig + if err := mapstructure.WeakDecode(m, &cfg); err != nil { + return nil, err + } + + t.CSIPluginConfig = &cfg + } + // If we have config, then parse that if o := listVal.Filter("config"); len(o.Items) > 0 { for _, o := range o.Elem().Items { diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index 13639a1b2..924d3ab71 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -117,11 +117,32 @@ func TestParse(t *testing.T) { Operand: "=", }, }, - Volumes: map[string]*api.VolumeRequest{ "foo": { - Name: "foo", - Type: "host", + Name: "foo", + Type: "host", + Source: "/path", + ExtraKeysHCL: nil, + }, + "bar": { + Name: "bar", + Type: "csi", + Source: "bar-vol", + MountOptions: &api.CSIMountOptions{ + FSType: "ext4", + }, + ExtraKeysHCL: nil, + }, + "baz": { + Name: "baz", + Type: "csi", + Source: "bar-vol", + MountOptions: &api.CSIMountOptions{ + MountFlags: []string{ + "ro", + }, + }, + ExtraKeysHCL: nil, }, }, Affinities: []*api.Affinity{ @@ -569,6 +590,30 @@ func TestParse(t *testing.T) { }, false, }, + { + "csi-plugin.hcl", + &api.Job{ + ID: helper.StringToPtr("binstore-storagelocker"), + Name: helper.StringToPtr("binstore-storagelocker"), + TaskGroups: []*api.TaskGroup{ + { + Name: helper.StringToPtr("binsl"), + Tasks: []*api.Task{ + { + Name: "binstore", + Driver: "docker", + CSIPluginConfig: &api.TaskCSIPluginConfig{ + ID: "org.hashicorp.csi", + Type: api.CSIPluginTypeMonolith, + MountDir: "/csi/test", + }, + }, + }, + }, + }, + }, + false, + }, { "service-check-initial-status.hcl", &api.Job{ diff --git a/jobspec/test-fixtures/basic.hcl b/jobspec/test-fixtures/basic.hcl index 1f9d3e73d..feae6667f 100644 --- a/jobspec/test-fixtures/basic.hcl +++ b/jobspec/test-fixtures/basic.hcl @@ -71,7 +71,26 @@ job "binstore-storagelocker" { count = 5 volume "foo" { - type = "host" + type = "host" + source = "/path" + } + + volume "bar" { + type = "csi" + source = "bar-vol" + + mount_options { + fs_type = "ext4" + } + } + + volume "baz" { + type = "csi" + source = "bar-vol" + + mount_options { + mount_flags = ["ro"] + } } restart { diff --git a/jobspec/test-fixtures/csi-plugin.hcl b/jobspec/test-fixtures/csi-plugin.hcl new file mode 100644 index 000000000..b879da184 --- /dev/null +++ b/jobspec/test-fixtures/csi-plugin.hcl @@ -0,0 +1,13 @@ +job "binstore-storagelocker" { + group "binsl" { + task "binstore" { + driver = "docker" + + csi_plugin { + id = "org.hashicorp.csi" + type = "monolith" + mount_dir = "/csi/test" + } + } + } +} diff --git a/nomad/client_csi_endpoint.go b/nomad/client_csi_endpoint.go new file mode 100644 index 000000000..18f60b361 --- /dev/null +++ b/nomad/client_csi_endpoint.go @@ -0,0 +1,118 @@ +package nomad + +import ( + "errors" + "fmt" + "time" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + cstructs "github.com/hashicorp/nomad/client/structs" +) + +// ClientCSIController is used to forward RPC requests to the targed Nomad client's +// CSIController endpoint. +type ClientCSIController struct { + srv *Server + logger log.Logger +} + +func (a *ClientCSIController) AttachVolume(args *cstructs.ClientCSIControllerAttachVolumeRequest, reply *cstructs.ClientCSIControllerAttachVolumeResponse) error { + defer metrics.MeasureSince([]string{"nomad", "client_csi_controller", "attach_volume"}, time.Now()) + + // Verify the arguments. + if args.ControllerNodeID == "" { + return errors.New("missing ControllerNodeID") + } + + // Make sure Node is valid and new enough to support RPC + snap, err := a.srv.State().Snapshot() + if err != nil { + return err + } + + _, err = getNodeForRpc(snap, args.ControllerNodeID) + if err != nil { + return err + } + + // Get the connection to the client + state, ok := a.srv.getNodeConn(args.ControllerNodeID) + if !ok { + return findNodeConnAndForward(a.srv, args.ControllerNodeID, "ClientCSIController.AttachVolume", args, reply) + } + + // Make the RPC + err = NodeRpc(state.Session, "CSIController.AttachVolume", args, reply) + if err != nil { + return fmt.Errorf("attach volume: %v", err) + } + return nil +} + +func (a *ClientCSIController) ValidateVolume(args *cstructs.ClientCSIControllerValidateVolumeRequest, reply *cstructs.ClientCSIControllerValidateVolumeResponse) error { + defer metrics.MeasureSince([]string{"nomad", "client_csi_controller", "validate_volume"}, time.Now()) + + // Verify the arguments. + if args.ControllerNodeID == "" { + return errors.New("missing ControllerNodeID") + } + + // Make sure Node is valid and new enough to support RPC + snap, err := a.srv.State().Snapshot() + if err != nil { + return err + } + + _, err = getNodeForRpc(snap, args.ControllerNodeID) + if err != nil { + return err + } + + // Get the connection to the client + state, ok := a.srv.getNodeConn(args.ControllerNodeID) + if !ok { + return findNodeConnAndForward(a.srv, args.ControllerNodeID, "ClientCSIController.ValidateVolume", args, reply) + } + + // Make the RPC + err = NodeRpc(state.Session, "CSIController.ValidateVolume", args, reply) + if err != nil { + return fmt.Errorf("validate volume: %v", err) + } + return nil +} + +func (a *ClientCSIController) DetachVolume(args *cstructs.ClientCSIControllerDetachVolumeRequest, reply *cstructs.ClientCSIControllerDetachVolumeResponse) error { + defer metrics.MeasureSince([]string{"nomad", "client_csi_controller", "detach_volume"}, time.Now()) + + // Verify the arguments. + if args.ControllerNodeID == "" { + return errors.New("missing ControllerNodeID") + } + + // Make sure Node is valid and new enough to support RPC + snap, err := a.srv.State().Snapshot() + if err != nil { + return err + } + + _, err = getNodeForRpc(snap, args.ControllerNodeID) + if err != nil { + return err + } + + // Get the connection to the client + state, ok := a.srv.getNodeConn(args.ControllerNodeID) + if !ok { + return findNodeConnAndForward(a.srv, args.ControllerNodeID, "ClientCSIController.DetachVolume", args, reply) + } + + // Make the RPC + err = NodeRpc(state.Session, "CSIController.DetachVolume", args, reply) + if err != nil { + return fmt.Errorf("detach volume: %v", err) + } + return nil + +} diff --git a/nomad/client_csi_endpoint_test.go b/nomad/client_csi_endpoint_test.go new file mode 100644 index 000000000..56e52cd06 --- /dev/null +++ b/nomad/client_csi_endpoint_test.go @@ -0,0 +1,169 @@ +package nomad + +import ( + "testing" + + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/client" + "github.com/hashicorp/nomad/client/config" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/require" +) + +func TestClientCSIController_AttachVolume_Local(t *testing.T) { + t.Parallel() + require := require.New(t) + + // Start a server and client + s, cleanupS := TestServer(t, nil) + defer cleanupS() + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + c, cleanupC := client.TestClient(t, func(c *config.Config) { + c.Servers = []string{s.config.RPCAddr.String()} + }) + defer cleanupC() + + testutil.WaitForResult(func() (bool, error) { + nodes := s.connectedNodes() + return len(nodes) == 1, nil + }, func(err error) { + require.Fail("should have a client") + }) + + req := &cstructs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: cstructs.CSIControllerQuery{ControllerNodeID: c.NodeID()}, + } + + // Fetch the response + var resp structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "ClientCSIController.AttachVolume", req, &resp) + require.NotNil(err) + // Should recieve an error from the client endpoint + require.Contains(err.Error(), "must specify plugin name to dispense") +} + +func TestClientCSIController_AttachVolume_Forwarded(t *testing.T) { + t.Parallel() + require := require.New(t) + + // Start a server and client + s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 }) + defer cleanupS1() + s2, cleanupS2 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 }) + defer cleanupS2() + TestJoin(t, s1, s2) + testutil.WaitForLeader(t, s1.RPC) + testutil.WaitForLeader(t, s2.RPC) + codec := rpcClient(t, s2) + + c, cleanupC := client.TestClient(t, func(c *config.Config) { + c.Servers = []string{s2.config.RPCAddr.String()} + c.GCDiskUsageThreshold = 100.0 + }) + defer cleanupC() + + testutil.WaitForResult(func() (bool, error) { + nodes := s2.connectedNodes() + return len(nodes) == 1, nil + }, func(err error) { + require.Fail("should have a client") + }) + + // Force remove the connection locally in case it exists + s1.nodeConnsLock.Lock() + delete(s1.nodeConns, c.NodeID()) + s1.nodeConnsLock.Unlock() + + req := &cstructs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: cstructs.CSIControllerQuery{ControllerNodeID: c.NodeID()}, + } + + // Fetch the response + var resp structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "ClientCSIController.AttachVolume", req, &resp) + require.NotNil(err) + // Should recieve an error from the client endpoint + require.Contains(err.Error(), "must specify plugin name to dispense") +} + +func TestClientCSIController_DetachVolume_Local(t *testing.T) { + t.Parallel() + require := require.New(t) + + // Start a server and client + s, cleanupS := TestServer(t, nil) + defer cleanupS() + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + c, cleanupC := client.TestClient(t, func(c *config.Config) { + c.Servers = []string{s.config.RPCAddr.String()} + }) + defer cleanupC() + + testutil.WaitForResult(func() (bool, error) { + nodes := s.connectedNodes() + return len(nodes) == 1, nil + }, func(err error) { + require.Fail("should have a client") + }) + + req := &cstructs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: cstructs.CSIControllerQuery{ControllerNodeID: c.NodeID()}, + } + + // Fetch the response + var resp structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "ClientCSIController.DetachVolume", req, &resp) + require.NotNil(err) + // Should recieve an error from the client endpoint + require.Contains(err.Error(), "must specify plugin name to dispense") +} + +func TestClientCSIController_DetachVolume_Forwarded(t *testing.T) { + t.Parallel() + require := require.New(t) + + // Start a server and client + s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 }) + defer cleanupS1() + s2, cleanupS2 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 }) + defer cleanupS2() + TestJoin(t, s1, s2) + testutil.WaitForLeader(t, s1.RPC) + testutil.WaitForLeader(t, s2.RPC) + codec := rpcClient(t, s2) + + c, cleanupC := client.TestClient(t, func(c *config.Config) { + c.Servers = []string{s2.config.RPCAddr.String()} + c.GCDiskUsageThreshold = 100.0 + }) + defer cleanupC() + + testutil.WaitForResult(func() (bool, error) { + nodes := s2.connectedNodes() + return len(nodes) == 1, nil + }, func(err error) { + require.Fail("should have a client") + }) + + // Force remove the connection locally in case it exists + s1.nodeConnsLock.Lock() + delete(s1.nodeConns, c.NodeID()) + s1.nodeConnsLock.Unlock() + + req := &cstructs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: cstructs.CSIControllerQuery{ControllerNodeID: c.NodeID()}, + } + + // Fetch the response + var resp structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "ClientCSIController.DetachVolume", req, &resp) + require.NotNil(err) + // Should recieve an error from the client endpoint + require.Contains(err.Error(), "must specify plugin name to dispense") +} diff --git a/nomad/client_rpc.go b/nomad/client_rpc.go index ca8db2336..b17c3d39f 100644 --- a/nomad/client_rpc.go +++ b/nomad/client_rpc.go @@ -219,14 +219,14 @@ func NodeRpc(session *yamux.Session, method string, args, reply interface{}) err // Open a new session stream, err := session.Open() if err != nil { - return err + return fmt.Errorf("session open: %v", err) } defer stream.Close() // Write the RpcNomad byte to set the mode if _, err := stream.Write([]byte{byte(pool.RpcNomad)}); err != nil { stream.Close() - return err + return fmt.Errorf("set mode: %v", err) } // Make the RPC diff --git a/nomad/core_sched.go b/nomad/core_sched.go index 1fb7330ea..85781c890 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -3,10 +3,12 @@ package nomad import ( "fmt" "math" + "strings" "time" log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" + multierror "github.com/hashicorp/go-multierror" version "github.com/hashicorp/go-version" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -41,7 +43,8 @@ func NewCoreScheduler(srv *Server, snap *state.StateSnapshot) scheduler.Schedule // Process is used to implement the scheduler.Scheduler interface func (c *CoreScheduler) Process(eval *structs.Evaluation) error { - switch eval.JobID { + job := strings.Split(eval.JobID, ":") // extra data can be smuggled in w/ JobID + switch job[0] { case structs.CoreJobEvalGC: return c.evalGC(eval) case structs.CoreJobNodeGC: @@ -50,6 +53,8 @@ func (c *CoreScheduler) Process(eval *structs.Evaluation) error { return c.jobGC(eval) case structs.CoreJobDeploymentGC: return c.deploymentGC(eval) + case structs.CoreJobCSIVolumeClaimGC: + return c.csiVolumeClaimGC(eval) case structs.CoreJobForceGC: return c.forceGC(eval) default: @@ -141,6 +146,7 @@ OUTER: gcAlloc = append(gcAlloc, jobAlloc...) gcEval = append(gcEval, jobEval...) } + } // Fast-path the nothing case @@ -150,6 +156,11 @@ OUTER: c.logger.Debug("job GC found eligible objects", "jobs", len(gcJob), "evals", len(gcEval), "allocs", len(gcAlloc)) + // Clean up any outstanding volume claims + if err := c.volumeClaimReap(gcJob, eval.LeaderACL); err != nil { + return err + } + // Reap the evals and allocs if err := c.evalReap(gcEval, gcAlloc); err != nil { return err @@ -703,3 +714,124 @@ func allocGCEligible(a *structs.Allocation, job *structs.Job, gcTime time.Time, return timeDiff > interval.Nanoseconds() } + +// csiVolumeClaimGC is used to garbage collect CSI volume claims +func (c *CoreScheduler) csiVolumeClaimGC(eval *structs.Evaluation) error { + c.logger.Trace("garbage collecting unclaimed CSI volume claims") + + // JobID smuggled in with the eval's own JobID + var jobID string + evalJobID := strings.Split(eval.JobID, ":") + if len(evalJobID) != 2 { + c.logger.Error("volume gc called without jobID") + return nil + } + + jobID = evalJobID[1] + job, err := c.srv.State().JobByID(nil, eval.Namespace, jobID) + if err != nil || job == nil { + c.logger.Trace( + "cannot find job to perform volume claim GC. it may have been garbage collected", + "job", jobID) + return nil + } + c.volumeClaimReap([]*structs.Job{job}, eval.LeaderACL) + return nil +} + +// volumeClaimReap contacts the leader and releases volume claims from +// terminal allocs +func (c *CoreScheduler) volumeClaimReap(jobs []*structs.Job, leaderACL string) error { + ws := memdb.NewWatchSet() + var result *multierror.Error + + for _, job := range jobs { + c.logger.Trace("garbage collecting unclaimed CSI volume claims for job", "job", job.ID) + for _, taskGroup := range job.TaskGroups { + for _, tgVolume := range taskGroup.Volumes { + if tgVolume.Type != structs.VolumeTypeCSI { + continue // filter to just CSI volumes + } + volID := tgVolume.Source + vol, err := c.srv.State().CSIVolumeByID(ws, job.Namespace, volID) + if err != nil { + result = multierror.Append(result, err) + continue + } + if vol == nil { + c.logger.Trace("cannot find volume to be GC'd. it may have been deregistered", + "volume", volID) + continue + } + vol, err = c.srv.State().CSIVolumeDenormalize(ws, vol) + if err != nil { + result = multierror.Append(result, err) + continue + } + + gcAllocs := []string{} // alloc IDs + claimedNodes := map[string]struct{}{} + knownNodes := []string{} + + collectFunc := func(allocs map[string]*structs.Allocation) { + for _, alloc := range allocs { + // we call denormalize on the volume above to populate + // Allocation pointers. But the alloc might have been + // garbage collected concurrently, so if the alloc is + // still nil we can safely skip it. + if alloc == nil { + continue + } + knownNodes = append(knownNodes, alloc.NodeID) + if !alloc.Terminated() { + // if there are any unterminated allocs, we + // don't want to unpublish the volume, just + // release the alloc's claim + claimedNodes[alloc.NodeID] = struct{}{} + continue + } + gcAllocs = append(gcAllocs, alloc.ID) + } + } + + collectFunc(vol.WriteAllocs) + collectFunc(vol.ReadAllocs) + + req := &structs.CSIVolumeClaimRequest{ + VolumeID: volID, + AllocationID: "", // controller unpublish never uses this field + Claim: structs.CSIVolumeClaimRelease, + WriteRequest: structs.WriteRequest{ + Region: job.Region, + Namespace: job.Namespace, + AuthToken: leaderACL, + }, + } + + // we only emit the controller unpublish if no other allocs + // on the node need it, but we also only want to make this + // call at most once per node + for _, node := range knownNodes { + if _, isClaimed := claimedNodes[node]; isClaimed { + continue + } + err = c.srv.controllerUnpublishVolume(req, node) + if err != nil { + result = multierror.Append(result, err) + continue + } + } + + for _, allocID := range gcAllocs { + req.AllocationID = allocID + err = c.srv.RPC("CSIVolume.Claim", req, &structs.CSIVolumeClaimResponse{}) + if err != nil { + c.logger.Error("volume claim release failed", "error", err) + result = multierror.Append(result, err) + } + } + } + } + } + return result.ErrorOrNil() +} diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 70b500a82..773804e24 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -2193,3 +2193,241 @@ func TestAllocation_GCEligible(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete require.True(allocGCEligible(alloc, nil, time.Now(), 1000)) } + +func TestCSI_GCVolumeClaims(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + state := srv.fsm.State() + ws := memdb.NewWatchSet() + + // Create a client node, plugin, and volume + node := mock.Node() + node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "csi-plugin-example": {PluginID: "csi-plugin-example", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + err := state.UpsertNode(99, node) + require.NoError(t, err) + volId0 := uuid.Generate() + ns := structs.DefaultNamespace + vols := []*structs.CSIVolume{{ + ID: volId0, + Namespace: ns, + PluginID: "csi-plugin-example", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }} + err = state.CSIVolumeRegister(100, vols) + require.NoError(t, err) + vol, err := state.CSIVolumeByID(ws, ns, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 0) + require.Len(t, vol.WriteAllocs, 0) + + // Create a job with 2 allocations + job := mock.Job() + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + "_": { + Name: "someVolume", + Type: structs.VolumeTypeCSI, + Source: volId0, + ReadOnly: false, + }, + } + err = state.UpsertJob(101, job) + require.NoError(t, err) + + alloc1 := mock.Alloc() + alloc1.JobID = job.ID + alloc1.NodeID = node.ID + err = state.UpsertJobSummary(102, mock.JobSummary(alloc1.JobID)) + require.NoError(t, err) + alloc1.TaskGroup = job.TaskGroups[0].Name + + alloc2 := mock.Alloc() + alloc2.JobID = job.ID + alloc2.NodeID = node.ID + err = state.UpsertJobSummary(103, mock.JobSummary(alloc2.JobID)) + require.NoError(t, err) + alloc2.TaskGroup = job.TaskGroups[0].Name + + err = state.UpsertAllocs(104, []*structs.Allocation{alloc1, alloc2}) + require.NoError(t, err) + + // Claim the volumes and verify the claims were set + err = state.CSIVolumeClaim(105, ns, volId0, alloc1, structs.CSIVolumeClaimWrite) + require.NoError(t, err) + err = state.CSIVolumeClaim(106, ns, volId0, alloc2, structs.CSIVolumeClaimRead) + require.NoError(t, err) + vol, err = state.CSIVolumeByID(ws, ns, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 1) + require.Len(t, vol.WriteAllocs, 1) + + // Update the 1st alloc as failed/terminated + alloc1.ClientStatus = structs.AllocClientStatusFailed + err = state.UpdateAllocsFromClient(107, []*structs.Allocation{alloc1}) + require.NoError(t, err) + + // Create the GC eval we'd get from Node.UpdateAlloc + now := time.Now().UTC() + eval := &structs.Evaluation{ + ID: uuid.Generate(), + Namespace: job.Namespace, + Priority: structs.CoreJobPriority, + Type: structs.JobTypeCore, + TriggeredBy: structs.EvalTriggerAllocStop, + JobID: structs.CoreJobCSIVolumeClaimGC + ":" + job.ID, + LeaderACL: srv.getLeaderAcl(), + Status: structs.EvalStatusPending, + CreateTime: now.UTC().UnixNano(), + ModifyTime: now.UTC().UnixNano(), + } + + // Process the eval + snap, err := state.Snapshot() + require.NoError(t, err) + core := NewCoreScheduler(srv, snap) + err = core.Process(eval) + require.NoError(t, err) + + // Verify the claim was released + vol, err = state.CSIVolumeByID(ws, ns, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 1) + require.Len(t, vol.WriteAllocs, 0) +} + +func TestCSI_GCVolumeClaims_Controller(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + state := srv.fsm.State() + ws := memdb.NewWatchSet() + + // Create a client node, plugin, and volume + node := mock.Node() + node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "csi-plugin-example": { + PluginID: "csi-plugin-example", + Healthy: true, + RequiresControllerPlugin: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + node.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "csi-plugin-example": { + PluginID: "csi-plugin-example", + Healthy: true, + RequiresControllerPlugin: true, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsReadOnlyAttach: true, + SupportsAttachDetach: true, + SupportsListVolumes: true, + SupportsListVolumesAttachedNodes: false, + }, + }, + } + err := state.UpsertNode(99, node) + require.NoError(t, err) + volId0 := uuid.Generate() + ns := structs.DefaultNamespace + vols := []*structs.CSIVolume{{ + ID: volId0, + Namespace: ns, + PluginID: "csi-plugin-example", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }} + err = state.CSIVolumeRegister(100, vols) + require.NoError(t, err) + vol, err := state.CSIVolumeByID(ws, ns, volId0) + + require.NoError(t, err) + require.True(t, vol.ControllerRequired) + require.Len(t, vol.ReadAllocs, 0) + require.Len(t, vol.WriteAllocs, 0) + + // Create a job with 2 allocations + job := mock.Job() + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + "_": { + Name: "someVolume", + Type: structs.VolumeTypeCSI, + Source: volId0, + ReadOnly: false, + }, + } + err = state.UpsertJob(101, job) + require.NoError(t, err) + + alloc1 := mock.Alloc() + alloc1.JobID = job.ID + alloc1.NodeID = node.ID + err = state.UpsertJobSummary(102, mock.JobSummary(alloc1.JobID)) + require.NoError(t, err) + alloc1.TaskGroup = job.TaskGroups[0].Name + + alloc2 := mock.Alloc() + alloc2.JobID = job.ID + alloc2.NodeID = node.ID + err = state.UpsertJobSummary(103, mock.JobSummary(alloc2.JobID)) + require.NoError(t, err) + alloc2.TaskGroup = job.TaskGroups[0].Name + + err = state.UpsertAllocs(104, []*structs.Allocation{alloc1, alloc2}) + require.NoError(t, err) + + // Claim the volumes and verify the claims were set + err = state.CSIVolumeClaim(105, ns, volId0, alloc1, structs.CSIVolumeClaimWrite) + require.NoError(t, err) + err = state.CSIVolumeClaim(106, ns, volId0, alloc2, structs.CSIVolumeClaimRead) + require.NoError(t, err) + vol, err = state.CSIVolumeByID(ws, ns, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 1) + require.Len(t, vol.WriteAllocs, 1) + + // Update both allocs as failed/terminated + alloc1.ClientStatus = structs.AllocClientStatusFailed + alloc2.ClientStatus = structs.AllocClientStatusFailed + err = state.UpdateAllocsFromClient(107, []*structs.Allocation{alloc1, alloc2}) + require.NoError(t, err) + + // Create the GC eval we'd get from Node.UpdateAlloc + now := time.Now().UTC() + eval := &structs.Evaluation{ + ID: uuid.Generate(), + Namespace: job.Namespace, + Priority: structs.CoreJobPriority, + Type: structs.JobTypeCore, + TriggeredBy: structs.EvalTriggerAllocStop, + JobID: structs.CoreJobCSIVolumeClaimGC + ":" + job.ID, + LeaderACL: srv.getLeaderAcl(), + Status: structs.EvalStatusPending, + CreateTime: now.UTC().UnixNano(), + ModifyTime: now.UTC().UnixNano(), + } + + // Process the eval + snap, err := state.Snapshot() + require.NoError(t, err) + core := NewCoreScheduler(srv, snap) + err = core.Process(eval) + require.NoError(t, err) + + // Verify both claims were released + vol, err = state.CSIVolumeByID(ws, ns, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 0) + require.Len(t, vol.WriteAllocs, 0) +} diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go new file mode 100644 index 000000000..b0968c615 --- /dev/null +++ b/nomad/csi_endpoint.go @@ -0,0 +1,678 @@ +package nomad + +import ( + "fmt" + "math/rand" + "time" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + memdb "github.com/hashicorp/go-memdb" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/acl" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/nomad/state" + "github.com/hashicorp/nomad/nomad/structs" +) + +// CSIVolume wraps the structs.CSIVolume with request data and server context +type CSIVolume struct { + srv *Server + logger log.Logger +} + +// QueryACLObj looks up the ACL token in the request and returns the acl.ACL object +// - fallback to node secret ids +func (srv *Server) QueryACLObj(args *structs.QueryOptions, allowNodeAccess bool) (*acl.ACL, error) { + // Lookup the token + aclObj, err := srv.ResolveToken(args.AuthToken) + if err != nil { + // If ResolveToken had an unexpected error return that + if !structs.IsErrTokenNotFound(err) { + return nil, err + } + + // If we don't allow access to this endpoint from Nodes, then return token + // not found. + if !allowNodeAccess { + return nil, structs.ErrTokenNotFound + } + + ws := memdb.NewWatchSet() + // Attempt to lookup AuthToken as a Node.SecretID since nodes may call + // call this endpoint and don't have an ACL token. + node, stateErr := srv.fsm.State().NodeBySecretID(ws, args.AuthToken) + if stateErr != nil { + // Return the original ResolveToken error with this err + var merr multierror.Error + merr.Errors = append(merr.Errors, err, stateErr) + return nil, merr.ErrorOrNil() + } + + // We did not find a Node for this ID, so return Token Not Found. + if node == nil { + return nil, structs.ErrTokenNotFound + } + } + + // Return either the users aclObj, or nil if ACLs are disabled. + return aclObj, nil +} + +// WriteACLObj calls QueryACLObj for a WriteRequest +func (srv *Server) WriteACLObj(args *structs.WriteRequest, allowNodeAccess bool) (*acl.ACL, error) { + opts := &structs.QueryOptions{ + Region: args.RequestRegion(), + Namespace: args.RequestNamespace(), + AuthToken: args.AuthToken, + } + return srv.QueryACLObj(opts, allowNodeAccess) +} + +const ( + csiVolumeTable = "csi_volumes" + csiPluginTable = "csi_plugins" +) + +// replySetIndex sets the reply with the last index that modified the table +func (srv *Server) replySetIndex(table string, reply *structs.QueryMeta) error { + s := srv.fsm.State() + + index, err := s.Index(table) + if err != nil { + return err + } + reply.Index = index + + // Set the query response + srv.setQueryMeta(reply) + return nil +} + +// List replies with CSIVolumes, filtered by ACL access +func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIVolumeListResponse) error { + if done, err := v.srv.forward("CSIVolume.List", args, args, reply); done { + return err + } + + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIListVolume, + acl.NamespaceCapabilityCSIReadVolume, + acl.NamespaceCapabilityCSIMountVolume, + acl.NamespaceCapabilityListJobs) + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, false) + if err != nil { + return err + } + + if !allowVolume(aclObj, args.RequestNamespace()) { + return structs.ErrPermissionDenied + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "volume", "list"}, metricsStart) + + ns := args.RequestNamespace() + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, state *state.StateStore) error { + // Query all volumes + var err error + var iter memdb.ResultIterator + + if args.NodeID != "" { + iter, err = state.CSIVolumesByNodeID(ws, ns, args.NodeID) + } else if args.PluginID != "" { + iter, err = state.CSIVolumesByPluginID(ws, ns, args.PluginID) + } else { + iter, err = state.CSIVolumesByNamespace(ws, ns) + } + + if err != nil { + return err + } + + // Collect results, filter by ACL access + var vs []*structs.CSIVolListStub + + for { + raw := iter.Next() + if raw == nil { + break + } + + vol := raw.(*structs.CSIVolume) + vol, err := state.CSIVolumeDenormalizePlugins(ws, vol.Copy()) + if err != nil { + return err + } + + // Filter (possibly again) on PluginID to handle passing both NodeID and PluginID + if args.PluginID != "" && args.PluginID != vol.PluginID { + continue + } + + vs = append(vs, vol.Stub()) + } + reply.Volumes = vs + return v.srv.replySetIndex(csiVolumeTable, &reply.QueryMeta) + }} + return v.srv.blockingRPC(&opts) +} + +// Get fetches detailed information about a specific volume +func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVolumeGetResponse) error { + if done, err := v.srv.forward("CSIVolume.Get", args, args, reply); done { + return err + } + + allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIReadVolume, + acl.NamespaceCapabilityCSIMountVolume, + acl.NamespaceCapabilityReadJob) + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, true) + if err != nil { + return err + } + + ns := args.RequestNamespace() + if !allowCSIAccess(aclObj, ns) { + return structs.ErrPermissionDenied + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "volume", "get"}, metricsStart) + + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, state *state.StateStore) error { + vol, err := state.CSIVolumeByID(ws, ns, args.ID) + if err != nil { + return err + } + if vol != nil { + vol, err = state.CSIVolumeDenormalize(ws, vol) + } + if err != nil { + return err + } + + reply.Volume = vol + return v.srv.replySetIndex(csiVolumeTable, &reply.QueryMeta) + }} + return v.srv.blockingRPC(&opts) +} + +func (srv *Server) pluginValidateVolume(req *structs.CSIVolumeRegisterRequest, vol *structs.CSIVolume) (*structs.CSIPlugin, error) { + state := srv.fsm.State() + ws := memdb.NewWatchSet() + + plugin, err := state.CSIPluginByID(ws, vol.PluginID) + if err != nil { + return nil, err + } + if plugin == nil { + return nil, fmt.Errorf("no CSI plugin named: %s could be found", vol.PluginID) + } + + vol.Provider = plugin.Provider + vol.ProviderVersion = plugin.Version + return plugin, nil +} + +func (srv *Server) controllerValidateVolume(req *structs.CSIVolumeRegisterRequest, vol *structs.CSIVolume, plugin *structs.CSIPlugin) error { + + if !plugin.ControllerRequired { + // The plugin does not require a controller, so for now we won't do any + // further validation of the volume. + return nil + } + + // The plugin requires a controller. Now we do some validation of the Volume + // to ensure that the registered capabilities are valid and that the volume + // exists. + + // plugin IDs are not scoped to region/DC but volumes are. + // so any node we get for a controller is already in the same region/DC + // for the volume. + nodeID, err := srv.nodeForControllerPlugin(plugin) + if err != nil || nodeID == "" { + return err + } + + method := "ClientCSIController.ValidateVolume" + cReq := &cstructs.ClientCSIControllerValidateVolumeRequest{ + VolumeID: vol.RemoteID(), + AttachmentMode: vol.AttachmentMode, + AccessMode: vol.AccessMode, + } + cReq.PluginID = plugin.ID + cReq.ControllerNodeID = nodeID + cResp := &cstructs.ClientCSIControllerValidateVolumeResponse{} + + return srv.RPC(method, cReq, cResp) +} + +// Register registers a new volume +func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *structs.CSIVolumeRegisterResponse) error { + if done, err := v.srv.forward("CSIVolume.Register", args, args, reply); done { + return err + } + + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIWriteVolume) + aclObj, err := v.srv.WriteACLObj(&args.WriteRequest, false) + if err != nil { + return err + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "volume", "register"}, metricsStart) + + if !allowVolume(aclObj, args.RequestNamespace()) || !aclObj.AllowPluginRead() { + return structs.ErrPermissionDenied + } + + // This is the only namespace we ACL checked, force all the volumes to use it. + // We also validate that the plugin exists for each plugin, and validate the + // capabilities when the plugin has a controller. + for _, vol := range args.Volumes { + vol.Namespace = args.RequestNamespace() + if err = vol.Validate(); err != nil { + return err + } + + plugin, err := v.srv.pluginValidateVolume(args, vol) + if err != nil { + return err + } + if err := v.srv.controllerValidateVolume(args, vol, plugin); err != nil { + return err + } + } + + resp, index, err := v.srv.raftApply(structs.CSIVolumeRegisterRequestType, args) + if err != nil { + v.logger.Error("csi raft apply failed", "error", err, "method", "register") + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + reply.Index = index + v.srv.setQueryMeta(&reply.QueryMeta) + return nil +} + +// Deregister removes a set of volumes +func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply *structs.CSIVolumeDeregisterResponse) error { + if done, err := v.srv.forward("CSIVolume.Deregister", args, args, reply); done { + return err + } + + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIWriteVolume) + aclObj, err := v.srv.WriteACLObj(&args.WriteRequest, false) + if err != nil { + return err + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "volume", "deregister"}, metricsStart) + + ns := args.RequestNamespace() + if !allowVolume(aclObj, ns) { + return structs.ErrPermissionDenied + } + + resp, index, err := v.srv.raftApply(structs.CSIVolumeDeregisterRequestType, args) + if err != nil { + v.logger.Error("csi raft apply failed", "error", err, "method", "deregister") + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + reply.Index = index + v.srv.setQueryMeta(&reply.QueryMeta) + return nil +} + +// Claim submits a change to a volume claim +func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CSIVolumeClaimResponse) error { + if done, err := v.srv.forward("CSIVolume.Claim", args, args, reply); done { + return err + } + + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIMountVolume) + aclObj, err := v.srv.WriteACLObj(&args.WriteRequest, true) + if err != nil { + return err + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "volume", "claim"}, metricsStart) + + if !allowVolume(aclObj, args.RequestNamespace()) || !aclObj.AllowPluginRead() { + return structs.ErrPermissionDenied + } + + // if this is a new claim, add a Volume and PublishContext from the + // controller (if any) to the reply + if args.Claim != structs.CSIVolumeClaimRelease { + err = v.srv.controllerPublishVolume(args, reply) + if err != nil { + return fmt.Errorf("controller publish: %v", err) + } + } + + resp, index, err := v.srv.raftApply(structs.CSIVolumeClaimRequestType, args) + if err != nil { + v.logger.Error("csi raft apply failed", "error", err, "method", "claim") + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + + reply.Index = index + v.srv.setQueryMeta(&reply.QueryMeta) + return nil +} + +// allowCSIMount is called on Job register to check mount permission +func allowCSIMount(aclObj *acl.ACL, namespace string) bool { + return aclObj.AllowPluginRead() && + aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityCSIMountVolume) +} + +// CSIPlugin wraps the structs.CSIPlugin with request data and server context +type CSIPlugin struct { + srv *Server + logger log.Logger +} + +// List replies with CSIPlugins, filtered by ACL access +func (v *CSIPlugin) List(args *structs.CSIPluginListRequest, reply *structs.CSIPluginListResponse) error { + if done, err := v.srv.forward("CSIPlugin.List", args, args, reply); done { + return err + } + + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, false) + if err != nil { + return err + } + + if !aclObj.AllowPluginList() { + return structs.ErrPermissionDenied + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "plugin", "list"}, metricsStart) + + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, state *state.StateStore) error { + // Query all plugins + iter, err := state.CSIPlugins(ws) + if err != nil { + return err + } + + // Collect results + var ps []*structs.CSIPluginListStub + for { + raw := iter.Next() + if raw == nil { + break + } + + plug := raw.(*structs.CSIPlugin) + ps = append(ps, plug.Stub()) + } + + reply.Plugins = ps + return v.srv.replySetIndex(csiPluginTable, &reply.QueryMeta) + }} + return v.srv.blockingRPC(&opts) +} + +// Get fetches detailed information about a specific plugin +func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPluginGetResponse) error { + if done, err := v.srv.forward("CSIPlugin.Get", args, args, reply); done { + return err + } + + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, false) + if err != nil { + return err + } + + if !aclObj.AllowPluginRead() { + return structs.ErrPermissionDenied + } + + withAllocs := aclObj == nil || + aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityReadJob) + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "plugin", "get"}, metricsStart) + + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, state *state.StateStore) error { + plug, err := state.CSIPluginByID(ws, args.ID) + if err != nil { + return err + } + + if plug == nil { + return nil + } + + if withAllocs { + plug, err = state.CSIPluginDenormalize(ws, plug.Copy()) + if err != nil { + return err + } + + // Filter the allocation stubs by our namespace. withAllocs + // means we're allowed + var as []*structs.AllocListStub + for _, a := range plug.Allocations { + if a.Namespace == args.RequestNamespace() { + as = append(as, a) + } + } + plug.Allocations = as + } + + reply.Plugin = plug + return v.srv.replySetIndex(csiPluginTable, &reply.QueryMeta) + }} + return v.srv.blockingRPC(&opts) +} + +// controllerPublishVolume sends publish request to the CSI controller +// plugin associated with a volume, if any. +func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, resp *structs.CSIVolumeClaimResponse) error { + plug, vol, err := srv.volAndPluginLookup(req.RequestNamespace(), req.VolumeID) + if err != nil { + return err + } + + // Set the Response volume from the lookup + resp.Volume = vol + + // Validate the existence of the allocation, regardless of whether we need it + // now. + state := srv.fsm.State() + ws := memdb.NewWatchSet() + alloc, err := state.AllocByID(ws, req.AllocationID) + if err != nil { + return err + } + if alloc == nil { + return fmt.Errorf("%s: %s", structs.ErrUnknownAllocationPrefix, req.AllocationID) + } + + // if no plugin was returned then controller validation is not required. + // Here we can return nil. + if plug == nil { + return nil + } + + // plugin IDs are not scoped to region/DC but volumes are. + // so any node we get for a controller is already in the same region/DC + // for the volume. + nodeID, err := srv.nodeForControllerPlugin(plug) + if err != nil || nodeID == "" { + return err + } + + targetNode, err := state.NodeByID(ws, alloc.NodeID) + if err != nil { + return err + } + if targetNode == nil { + return fmt.Errorf("%s: %s", structs.ErrUnknownNodePrefix, alloc.NodeID) + } + targetCSIInfo, ok := targetNode.CSINodePlugins[plug.ID] + if !ok { + return fmt.Errorf("Failed to find NodeInfo for node: %s", targetNode.ID) + } + + method := "ClientCSIController.AttachVolume" + cReq := &cstructs.ClientCSIControllerAttachVolumeRequest{ + VolumeID: vol.RemoteID(), + ClientCSINodeID: targetCSIInfo.NodeInfo.ID, + AttachmentMode: vol.AttachmentMode, + AccessMode: vol.AccessMode, + ReadOnly: req.Claim == structs.CSIVolumeClaimRead, + } + cReq.PluginID = plug.ID + cReq.ControllerNodeID = nodeID + cResp := &cstructs.ClientCSIControllerAttachVolumeResponse{} + + err = srv.RPC(method, cReq, cResp) + if err != nil { + return fmt.Errorf("attach volume: %v", err) + } + resp.PublishContext = cResp.PublishContext + return nil +} + +// controllerUnpublishVolume sends an unpublish request to the CSI +// controller plugin associated with a volume, if any. +// TODO: the only caller of this won't have an alloc pointer handy, should it be its own request arg type? +func (srv *Server) controllerUnpublishVolume(req *structs.CSIVolumeClaimRequest, targetNomadNodeID string) error { + plug, vol, err := srv.volAndPluginLookup(req.RequestNamespace(), req.VolumeID) + if plug == nil || vol == nil || err != nil { + return err // possibly nil if no controller required + } + + ws := memdb.NewWatchSet() + state := srv.State() + + targetNode, err := state.NodeByID(ws, targetNomadNodeID) + if err != nil { + return err + } + if targetNode == nil { + return fmt.Errorf("%s: %s", structs.ErrUnknownNodePrefix, targetNomadNodeID) + } + targetCSIInfo, ok := targetNode.CSINodePlugins[plug.ID] + if !ok { + return fmt.Errorf("Failed to find NodeInfo for node: %s", targetNode.ID) + } + + // plugin IDs are not scoped to region/DC but volumes are. + // so any node we get for a controller is already in the same region/DC + // for the volume. + nodeID, err := srv.nodeForControllerPlugin(plug) + if err != nil || nodeID == "" { + return err + } + + method := "ClientCSIController.DetachVolume" + cReq := &cstructs.ClientCSIControllerDetachVolumeRequest{ + VolumeID: vol.RemoteID(), + ClientCSINodeID: targetCSIInfo.NodeInfo.ID, + } + cReq.PluginID = plug.ID + cReq.ControllerNodeID = nodeID + return srv.RPC(method, cReq, &cstructs.ClientCSIControllerDetachVolumeResponse{}) +} + +func (srv *Server) volAndPluginLookup(namespace, volID string) (*structs.CSIPlugin, *structs.CSIVolume, error) { + state := srv.fsm.State() + ws := memdb.NewWatchSet() + + vol, err := state.CSIVolumeByID(ws, namespace, volID) + if err != nil { + return nil, nil, err + } + if vol == nil { + return nil, nil, fmt.Errorf("volume not found: %s", volID) + } + if !vol.ControllerRequired { + return nil, vol, nil + } + + // note: we do this same lookup in CSIVolumeByID but then throw + // away the pointer to the plugin rather than attaching it to + // the volume so we have to do it again here. + plug, err := state.CSIPluginByID(ws, vol.PluginID) + if err != nil { + return nil, nil, err + } + if plug == nil { + return nil, nil, fmt.Errorf("plugin not found: %s", vol.PluginID) + } + return plug, vol, nil +} + +// nodeForControllerPlugin returns the node ID for a random controller +// to load-balance long-blocking RPCs across client nodes. +func (srv *Server) nodeForControllerPlugin(plugin *structs.CSIPlugin) (string, error) { + count := len(plugin.Controllers) + if count == 0 { + return "", fmt.Errorf("no controllers available for plugin %q", plugin.ID) + } + snap, err := srv.fsm.State().Snapshot() + if err != nil { + return "", err + } + + // iterating maps is "random" but unspecified and isn't particularly + // random with small maps, so not well-suited for load balancing. + // so we shuffle the keys and iterate over them. + clientIDs := make([]string, count) + for clientID := range plugin.Controllers { + clientIDs = append(clientIDs, clientID) + } + rand.Shuffle(count, func(i, j int) { + clientIDs[i], clientIDs[j] = clientIDs[j], clientIDs[i] + }) + + for _, clientID := range clientIDs { + controller := plugin.Controllers[clientID] + if !controller.IsController() { + // we don't have separate types for CSIInfo depending on + // whether it's a controller or node. this error shouldn't + // make it to production but is to aid developers during + // development + err = fmt.Errorf("plugin is not a controller") + continue + } + _, err = getNodeForRpc(snap, clientID) + if err != nil { + continue + } + return clientID, nil + } + + return "", err +} diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go new file mode 100644 index 000000000..eec6ffc8a --- /dev/null +++ b/nomad/csi_endpoint_test.go @@ -0,0 +1,728 @@ +package nomad + +import ( + "fmt" + "testing" + + memdb "github.com/hashicorp/go-memdb" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/helper/uuid" + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/require" +) + +func TestCSIVolumeEndpoint_Get(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + + state := srv.fsm.State() + + codec := rpcClient(t, srv) + + id0 := uuid.Generate() + + // Create the volume + vols := []*structs.CSIVolume{{ + ID: id0, + Namespace: ns, + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + PluginID: "minnie", + }} + err := state.CSIVolumeRegister(999, vols) + require.NoError(t, err) + + // Create the register request + req := &structs.CSIVolumeGetRequest{ + ID: id0, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: ns, + }, + } + + var resp structs.CSIVolumeGetResponse + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req, &resp) + require.NoError(t, err) + require.Equal(t, uint64(999), resp.Index) + require.Equal(t, vols[0].ID, resp.Volume.ID) +} + +func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + + state := srv.fsm.State() + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + srv.config.ACLEnabled = true + policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIReadVolume}) + validToken := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) + + codec := rpcClient(t, srv) + + id0 := uuid.Generate() + + // Create the volume + vols := []*structs.CSIVolume{{ + ID: id0, + Namespace: ns, + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + PluginID: "minnie", + }} + err := state.CSIVolumeRegister(999, vols) + require.NoError(t, err) + + // Create the register request + req := &structs.CSIVolumeGetRequest{ + ID: id0, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: ns, + AuthToken: validToken.SecretID, + }, + } + + var resp structs.CSIVolumeGetResponse + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req, &resp) + require.NoError(t, err) + require.Equal(t, uint64(999), resp.Index) + require.Equal(t, vols[0].ID, resp.Volume.ID) +} + +func TestCSIVolumeEndpoint_Register(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + + state := srv.fsm.State() + codec := rpcClient(t, srv) + + id0 := uuid.Generate() + + // Create the node and plugin + node := mock.Node() + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": {PluginID: "minnie", + Healthy: true, + // Registers as node plugin that does not require a controller to skip + // the client RPC during registration. + NodeInfo: &structs.CSINodeInfo{}, + }, + } + require.NoError(t, state.UpsertNode(1000, node)) + + // Create the volume + vols := []*structs.CSIVolume{{ + ID: id0, + Namespace: "notTheNamespace", + PluginID: "minnie", + AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }} + + // Create the register request + req1 := &structs.CSIVolumeRegisterRequest{ + Volumes: vols, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + }, + } + resp1 := &structs.CSIVolumeRegisterResponse{} + err := msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1) + require.NoError(t, err) + require.NotEqual(t, uint64(0), resp1.Index) + + // Get the volume back out + req2 := &structs.CSIVolumeGetRequest{ + ID: id0, + QueryOptions: structs.QueryOptions{ + Region: "global", + }, + } + resp2 := &structs.CSIVolumeGetResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2) + require.NoError(t, err) + require.Equal(t, resp1.Index, resp2.Index) + require.Equal(t, vols[0].ID, resp2.Volume.ID) + + // Registration does not update + req1.Volumes[0].PluginID = "adam" + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1) + require.Error(t, err, "exists") + + // Deregistration works + req3 := &structs.CSIVolumeDeregisterRequest{ + VolumeIDs: []string{id0}, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + }, + } + resp3 := &structs.CSIVolumeDeregisterResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Deregister", req3, resp3) + require.NoError(t, err) + + // Volume is missing + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2) + require.NoError(t, err) + require.Nil(t, resp2.Volume) +} + +// TestCSIVolumeEndpoint_Claim exercises the VolumeClaim RPC, verifying that claims +// are honored only if the volume exists, the mode is permitted, and the volume +// is schedulable according to its count of claims. +func TestCSIVolumeEndpoint_Claim(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := "not-default-ns" + state := srv.fsm.State() + codec := rpcClient(t, srv) + id0 := uuid.Generate() + alloc := mock.BatchAlloc() + + // Create an initial volume claim request; we expect it to fail + // because there's no such volume yet. + claimReq := &structs.CSIVolumeClaimRequest{ + VolumeID: id0, + AllocationID: alloc.ID, + Claim: structs.CSIVolumeClaimWrite, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + }, + } + claimResp := &structs.CSIVolumeClaimResponse{} + err := msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.EqualError(t, err, fmt.Sprintf("controller publish: volume not found: %s", id0), + "expected 'volume not found' error because volume hasn't yet been created") + + // Create a client node, plugin, alloc, and volume + node := mock.Node() + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + err = state.UpsertNode(1002, node) + require.NoError(t, err) + + vols := []*structs.CSIVolume{{ + ID: id0, + Namespace: ns, + PluginID: "minnie", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + Topologies: []*structs.CSITopology{{ + Segments: map[string]string{"foo": "bar"}, + }}, + }} + err = state.CSIVolumeRegister(1003, vols) + require.NoError(t, err) + + // Upsert the job and alloc + alloc.NodeID = node.ID + summary := mock.JobSummary(alloc.JobID) + require.NoError(t, state.UpsertJobSummary(1004, summary)) + require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc})) + + // Now our claim should succeed + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.NoError(t, err) + + // Verify the claim was set + volGetReq := &structs.CSIVolumeGetRequest{ + ID: id0, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: ns, + }, + } + volGetResp := &structs.CSIVolumeGetResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp) + require.NoError(t, err) + require.Equal(t, id0, volGetResp.Volume.ID) + require.Len(t, volGetResp.Volume.ReadAllocs, 0) + require.Len(t, volGetResp.Volume.WriteAllocs, 1) + + // Make another writer claim for a different alloc + alloc2 := mock.Alloc() + summary = mock.JobSummary(alloc2.JobID) + require.NoError(t, state.UpsertJobSummary(1005, summary)) + require.NoError(t, state.UpsertAllocs(1006, []*structs.Allocation{alloc2})) + claimReq.AllocationID = alloc2.ID + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.EqualError(t, err, "volume max claim reached", + "expected 'volume max claim reached' because we only allow 1 writer") + + // Fix the mode and our claim will succeed + claimReq.Claim = structs.CSIVolumeClaimRead + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.NoError(t, err) + + // Verify the new claim was set + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp) + require.NoError(t, err) + require.Equal(t, id0, volGetResp.Volume.ID) + require.Len(t, volGetResp.Volume.ReadAllocs, 1) + require.Len(t, volGetResp.Volume.WriteAllocs, 1) + + // Claim is idempotent + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.NoError(t, err) + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp) + require.NoError(t, err) + require.Equal(t, id0, volGetResp.Volume.ID) + require.Len(t, volGetResp.Volume.ReadAllocs, 1) + require.Len(t, volGetResp.Volume.WriteAllocs, 1) +} + +// TestCSIVolumeEndpoint_ClaimWithController exercises the VolumeClaim RPC +// when a controller is required. +func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.ACLEnabled = true + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + state := srv.fsm.State() + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + + policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIMountVolume}) + + mock.PluginPolicy("read") + accessToken := mock.CreatePolicyAndToken(t, state, 1001, "claim", policy) + + codec := rpcClient(t, srv) + id0 := uuid.Generate() + + // Create a client node, plugin, alloc, and volume + node := mock.Node() + node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version + node.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsAttachDetach: true, + }, + RequiresControllerPlugin: true, + }, + } + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + err := state.UpsertNode(1002, node) + require.NoError(t, err) + vols := []*structs.CSIVolume{{ + ID: id0, + Namespace: ns, + PluginID: "minnie", + ControllerRequired: true, + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }} + err = state.CSIVolumeRegister(1003, vols) + + alloc := mock.BatchAlloc() + alloc.NodeID = node.ID + summary := mock.JobSummary(alloc.JobID) + require.NoError(t, state.UpsertJobSummary(1004, summary)) + require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc})) + + // Make the volume claim + claimReq := &structs.CSIVolumeClaimRequest{ + VolumeID: id0, + AllocationID: alloc.ID, + Claim: structs.CSIVolumeClaimWrite, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + AuthToken: accessToken.SecretID, + }, + } + claimResp := &structs.CSIVolumeClaimResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + // Because the node is not registered + require.EqualError(t, err, "controller publish: attach volume: No path to node") +} + +func TestCSIVolumeEndpoint_List(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + ms := "altNamespace" + + state := srv.fsm.State() + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + srv.config.ACLEnabled = true + codec := rpcClient(t, srv) + + nsPolicy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIReadVolume}) + + mock.PluginPolicy("read") + nsTok := mock.CreatePolicyAndToken(t, state, 1000, "csi-access", nsPolicy) + + id0 := uuid.Generate() + id1 := uuid.Generate() + id2 := uuid.Generate() + + // Create the volume + vols := []*structs.CSIVolume{{ + ID: id0, + Namespace: ns, + AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + PluginID: "minnie", + }, { + ID: id1, + Namespace: ns, + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + PluginID: "adam", + }, { + ID: id2, + Namespace: ms, + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + PluginID: "paddy", + }} + err := state.CSIVolumeRegister(1002, vols) + require.NoError(t, err) + + var resp structs.CSIVolumeListResponse + + // Query everything in the namespace + req := &structs.CSIVolumeListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: nsTok.SecretID, + Namespace: ns, + }, + } + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp) + require.NoError(t, err) + + require.Equal(t, uint64(1002), resp.Index) + require.Equal(t, 2, len(resp.Volumes)) + ids := map[string]bool{vols[0].ID: true, vols[1].ID: true} + for _, v := range resp.Volumes { + delete(ids, v.ID) + } + require.Equal(t, 0, len(ids)) + + // Query by PluginID in ns + req = &structs.CSIVolumeListRequest{ + PluginID: "adam", + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: ns, + AuthToken: nsTok.SecretID, + }, + } + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp) + require.NoError(t, err) + require.Equal(t, 1, len(resp.Volumes)) + require.Equal(t, vols[1].ID, resp.Volumes[0].ID) + + // Query by PluginID in ms + msPolicy := mock.NamespacePolicy(ms, "", []string{acl.NamespaceCapabilityCSIListVolume}) + + mock.PluginPolicy("read") + msTok := mock.CreatePolicyAndToken(t, state, 1003, "csi-access2", msPolicy) + + req = &structs.CSIVolumeListRequest{ + PluginID: "paddy", + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: ms, + AuthToken: msTok.SecretID, + }, + } + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp) + require.NoError(t, err) + require.Equal(t, 1, len(resp.Volumes)) +} + +func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + deleteNodes := CreateTestCSIPlugin(srv.fsm.State(), "foo") + defer deleteNodes() + + state := srv.fsm.State() + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + srv.config.ACLEnabled = true + codec := rpcClient(t, srv) + + // Get the plugin back out + listJob := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}) + policy := mock.PluginPolicy("read") + listJob + getToken := mock.CreatePolicyAndToken(t, state, 1001, "plugin-read", policy) + + req2 := &structs.CSIPluginGetRequest{ + ID: "foo", + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: getToken.SecretID, + }, + } + resp2 := &structs.CSIPluginGetResponse{} + err := msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) + require.NoError(t, err) + + // Get requires plugin-read, not plugin-list + lPolicy := mock.PluginPolicy("list") + lTok := mock.CreatePolicyAndToken(t, state, 1003, "plugin-list", lPolicy) + req2.AuthToken = lTok.SecretID + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) + require.Error(t, err, "Permission denied") + + // List plugins + req3 := &structs.CSIPluginListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: getToken.SecretID, + }, + } + resp3 := &structs.CSIPluginListResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.List", req3, resp3) + require.NoError(t, err) + require.Equal(t, 1, len(resp3.Plugins)) + + // ensure that plugin->alloc denormalization does COW correctly + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.List", req3, resp3) + require.NoError(t, err) + require.Equal(t, 1, len(resp3.Plugins)) + + // List allows plugin-list + req3.AuthToken = lTok.SecretID + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.List", req3, resp3) + require.NoError(t, err) + require.Equal(t, 1, len(resp3.Plugins)) + + // Deregistration works + deleteNodes() + + // Plugin is missing + req2.AuthToken = getToken.SecretID + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) + require.NoError(t, err) + require.Nil(t, resp2.Plugin) +} + +// TestCSIPluginEndpoint_ACLNamespaceAlloc checks that allocations are filtered by namespace +// when getting plugins, and enforcing that the client has job-read ACL access to the +// namespace of the allocations +func TestCSIPluginEndpoint_ACLNamespaceAlloc(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + state := srv.fsm.State() + + // Setup ACLs + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + srv.config.ACLEnabled = true + codec := rpcClient(t, srv) + listJob := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}) + policy := mock.PluginPolicy("read") + listJob + getToken := mock.CreatePolicyAndToken(t, state, 1001, "plugin-read", policy) + + // Create the plugin and then some allocations to pretend to be the allocs that are + // running the plugin tasks + deleteNodes := CreateTestCSIPlugin(srv.fsm.State(), "foo") + defer deleteNodes() + + plug, _ := state.CSIPluginByID(memdb.NewWatchSet(), "foo") + var allocs []*structs.Allocation + for _, info := range plug.Controllers { + a := mock.Alloc() + a.ID = info.AllocID + allocs = append(allocs, a) + } + for _, info := range plug.Nodes { + a := mock.Alloc() + a.ID = info.AllocID + allocs = append(allocs, a) + } + + require.Equal(t, 3, len(allocs)) + allocs[0].Namespace = "notTheNamespace" + + err := state.UpsertAllocs(1003, allocs) + require.NoError(t, err) + + req := &structs.CSIPluginGetRequest{ + ID: "foo", + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: getToken.SecretID, + }, + } + resp := &structs.CSIPluginGetResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req, resp) + require.NoError(t, err) + require.Equal(t, 2, len(resp.Plugin.Allocations)) + + for _, a := range resp.Plugin.Allocations { + require.Equal(t, structs.DefaultNamespace, a.Namespace) + } + + p2 := mock.PluginPolicy("read") + t2 := mock.CreatePolicyAndToken(t, state, 1004, "plugin-read2", p2) + req.AuthToken = t2.SecretID + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req, resp) + require.NoError(t, err) + require.Equal(t, 0, len(resp.Plugin.Allocations)) +} + +func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { + srv, shutdown := TestServer(t, func(c *Config) {}) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + state := srv.fsm.State() + id0 := uuid.Generate() + id1 := uuid.Generate() + id2 := uuid.Generate() + ns := "notTheNamespace" + + // Create a client node with a plugin + node := mock.Node() + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": {PluginID: "minnie", Healthy: true, RequiresControllerPlugin: true, + ControllerInfo: &structs.CSIControllerInfo{SupportsAttachDetach: true}, + }, + "adam": {PluginID: "adam", Healthy: true}, + } + err := state.UpsertNode(3, node) + require.NoError(t, err) + + // Create 2 volumes + vols := []*structs.CSIVolume{ + { + ID: id0, + Namespace: ns, + PluginID: "minnie", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + ControllerRequired: true, + }, + { + ID: id1, + Namespace: ns, + PluginID: "adam", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + ControllerRequired: false, + }, + } + err = state.CSIVolumeRegister(1002, vols) + require.NoError(t, err) + + // has controller + plugin, vol, err := srv.volAndPluginLookup(ns, id0) + require.NotNil(t, plugin) + require.NotNil(t, vol) + require.NoError(t, err) + + // no controller + plugin, vol, err = srv.volAndPluginLookup(ns, id1) + require.Nil(t, plugin) + require.NotNil(t, vol) + require.NoError(t, err) + + // doesn't exist + plugin, vol, err = srv.volAndPluginLookup(ns, id2) + require.Nil(t, plugin) + require.Nil(t, vol) + require.EqualError(t, err, fmt.Sprintf("volume not found: %s", id2)) +} + +func TestCSI_NodeForControllerPlugin(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) {}) + testutil.WaitForLeader(t, srv.RPC) + defer shutdown() + + plugins := map[string]*structs.CSIInfo{ + "minnie": {PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{}, + NodeInfo: &structs.CSINodeInfo{}, + RequiresControllerPlugin: true, + }, + } + state := srv.fsm.State() + + node1 := mock.Node() + node1.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early versions + node1.CSIControllerPlugins = plugins + node2 := mock.Node() + node2.CSIControllerPlugins = plugins + node2.ID = uuid.Generate() + node3 := mock.Node() + node3.ID = uuid.Generate() + + err := state.UpsertNode(1002, node1) + require.NoError(t, err) + err = state.UpsertNode(1003, node2) + require.NoError(t, err) + err = state.UpsertNode(1004, node3) + require.NoError(t, err) + + ws := memdb.NewWatchSet() + + plugin, err := state.CSIPluginByID(ws, "minnie") + require.NoError(t, err) + nodeID, err := srv.nodeForControllerPlugin(plugin) + + // only node1 has both the controller and a recent Nomad version + require.Equal(t, nodeID, node1.ID) +} diff --git a/nomad/fsm.go b/nomad/fsm.go index c8e7f8f07..2dbaee2f5 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -260,6 +260,12 @@ func (n *nomadFSM) Apply(log *raft.Log) interface{} { return n.applyUpsertSIAccessor(buf[1:], log.Index) case structs.ServiceIdentityAccessorDeregisterRequestType: return n.applyDeregisterSIAccessor(buf[1:], log.Index) + case structs.CSIVolumeRegisterRequestType: + return n.applyCSIVolumeRegister(buf[1:], log.Index) + case structs.CSIVolumeDeregisterRequestType: + return n.applyCSIVolumeDeregister(buf[1:], log.Index) + case structs.CSIVolumeClaimRequestType: + return n.applyCSIVolumeClaim(buf[1:], log.Index) } // Check enterprise only message types. @@ -1114,6 +1120,66 @@ func (n *nomadFSM) applySchedulerConfigUpdate(buf []byte, index uint64) interfac return n.state.SchedulerSetConfig(index, &req.Config) } +func (n *nomadFSM) applyCSIVolumeRegister(buf []byte, index uint64) interface{} { + var req structs.CSIVolumeRegisterRequest + if err := structs.Decode(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode request: %v", err)) + } + defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_register"}, time.Now()) + + if err := n.state.CSIVolumeRegister(index, req.Volumes); err != nil { + n.logger.Error("CSIVolumeRegister failed", "error", err) + return err + } + + return nil +} + +func (n *nomadFSM) applyCSIVolumeDeregister(buf []byte, index uint64) interface{} { + var req structs.CSIVolumeDeregisterRequest + if err := structs.Decode(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode request: %v", err)) + } + defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_deregister"}, time.Now()) + + if err := n.state.CSIVolumeDeregister(index, req.RequestNamespace(), req.VolumeIDs); err != nil { + n.logger.Error("CSIVolumeDeregister failed", "error", err) + return err + } + + return nil +} + +func (n *nomadFSM) applyCSIVolumeClaim(buf []byte, index uint64) interface{} { + var req structs.CSIVolumeClaimRequest + if err := structs.Decode(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode request: %v", err)) + } + defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_claim"}, time.Now()) + + ws := memdb.NewWatchSet() + alloc, err := n.state.AllocByID(ws, req.AllocationID) + if err != nil { + n.logger.Error("AllocByID failed", "error", err) + return err + } + if alloc == nil { + n.logger.Error("AllocByID failed to find alloc", "alloc_id", req.AllocationID) + if err != nil { + return err + } + + return structs.ErrUnknownAllocationPrefix + } + + if err := n.state.CSIVolumeClaim(index, req.RequestNamespace(), req.VolumeID, alloc, req.Claim); err != nil { + n.logger.Error("CSIVolumeClaim failed", "error", err) + return err + } + + return nil +} + func (n *nomadFSM) Snapshot() (raft.FSMSnapshot, error) { // Create a new snapshot snap, err := n.state.Snapshot() diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index 76c235a9f..a0b17323a 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -107,23 +107,28 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis // Validate Volume Permissions for _, tg := range args.Job.TaskGroups { for _, vol := range tg.Volumes { - if vol.Type != structs.VolumeTypeHost { + switch vol.Type { + case structs.VolumeTypeCSI: + if !allowCSIMount(aclObj, args.RequestNamespace()) { + return structs.ErrPermissionDenied + } + case structs.VolumeTypeHost: + // If a volume is readonly, then we allow access if the user has ReadOnly + // or ReadWrite access to the volume. Otherwise we only allow access if + // they have ReadWrite access. + if vol.ReadOnly { + if !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadOnly) && + !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadWrite) { + return structs.ErrPermissionDenied + } + } else { + if !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadWrite) { + return structs.ErrPermissionDenied + } + } + default: return structs.ErrPermissionDenied } - - // If a volume is readonly, then we allow access if the user has ReadOnly - // or ReadWrite access to the volume. Otherwise we only allow access if - // they have ReadWrite access. - if vol.ReadOnly { - if !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadOnly) && - !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadWrite) { - return structs.ErrPermissionDenied - } - } else { - if !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadWrite) { - return structs.ErrPermissionDenied - } - } } for _, t := range tg.Tasks { @@ -134,6 +139,12 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis return structs.ErrPermissionDenied } } + + if t.CSIPluginConfig != nil { + if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSIRegisterPlugin) { + return structs.ErrPermissionDenied + } + } } } diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 47c78c3a1..5954a2bc4 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -384,6 +384,10 @@ func TestJobEndpoint_Register_ACL(t *testing.T) { Source: "prod-ca-certs", ReadOnly: readonlyVolume, }, + "csi": { + Type: structs.VolumeTypeCSI, + Source: "prod-db", + }, } tg.Tasks[0].VolumeMounts = []*structs.VolumeMount{ @@ -398,17 +402,37 @@ func TestJobEndpoint_Register_ACL(t *testing.T) { return j } + newCSIPluginJob := func() *structs.Job { + j := mock.Job() + t := j.TaskGroups[0].Tasks[0] + t.CSIPluginConfig = &structs.TaskCSIPluginConfig{ + ID: "foo", + Type: "node", + } + return j + } + submitJobPolicy := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob, acl.NamespaceCapabilitySubmitJob}) submitJobToken := mock.CreatePolicyAndToken(t, s1.State(), 1001, "test-submit-job", submitJobPolicy) volumesPolicyReadWrite := mock.HostVolumePolicy("prod-*", "", []string{acl.HostVolumeCapabilityMountReadWrite}) - submitJobWithVolumesReadWriteToken := mock.CreatePolicyAndToken(t, s1.State(), 1002, "test-submit-volumes", submitJobPolicy+"\n"+volumesPolicyReadWrite) + volumesPolicyCSIMount := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityCSIMountVolume}) + + mock.PluginPolicy("read") + + submitJobWithVolumesReadWriteToken := mock.CreatePolicyAndToken(t, s1.State(), 1002, "test-submit-volumes", submitJobPolicy+ + volumesPolicyReadWrite+ + volumesPolicyCSIMount) volumesPolicyReadOnly := mock.HostVolumePolicy("prod-*", "", []string{acl.HostVolumeCapabilityMountReadOnly}) - submitJobWithVolumesReadOnlyToken := mock.CreatePolicyAndToken(t, s1.State(), 1003, "test-submit-volumes-readonly", submitJobPolicy+"\n"+volumesPolicyReadOnly) + submitJobWithVolumesReadOnlyToken := mock.CreatePolicyAndToken(t, s1.State(), 1003, "test-submit-volumes-readonly", submitJobPolicy+ + volumesPolicyReadOnly+ + volumesPolicyCSIMount) + + pluginPolicy := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityCSIRegisterPlugin}) + pluginToken := mock.CreatePolicyAndToken(t, s1.State(), 1005, "test-csi-register-plugin", submitJobPolicy+pluginPolicy) cases := []struct { Name string @@ -452,6 +476,18 @@ func TestJobEndpoint_Register_ACL(t *testing.T) { Token: submitJobWithVolumesReadOnlyToken.SecretID, ErrExpected: false, }, + { + Name: "with a token that can submit a job, plugin rejected", + Job: newCSIPluginJob(), + Token: submitJobToken.SecretID, + ErrExpected: true, + }, + { + Name: "with a token that also has csi-register-plugin, accepted", + Job: newCSIPluginJob(), + Token: pluginToken.SecretID, + ErrExpected: false, + }, } for _, tt := range cases { diff --git a/nomad/mock/acl.go b/nomad/mock/acl.go index 599bed4b5..d41a76016 100644 --- a/nomad/mock/acl.go +++ b/nomad/mock/acl.go @@ -73,6 +73,11 @@ func QuotaPolicy(policy string) string { return fmt.Sprintf("quota {\n\tpolicy = %q\n}\n", policy) } +// PluginPolicy is a helper for generating the hcl for a given plugin policy. +func PluginPolicy(policy string) string { + return fmt.Sprintf("plugin {\n\tpolicy = %q\n}\n", policy) +} + // CreatePolicy creates a policy with the given name and rule. func CreatePolicy(t testing.T, state StateStore, index uint64, name, rule string) { t.Helper() diff --git a/nomad/node_endpoint.go b/nomad/node_endpoint.go index e26891349..d8348a79e 100644 --- a/nomad/node_endpoint.go +++ b/nomad/node_endpoint.go @@ -1081,40 +1081,80 @@ func (n *Node) UpdateAlloc(args *structs.AllocUpdateRequest, reply *structs.Gene now := time.Now() var evals []*structs.Evaluation - for _, alloc := range args.Alloc { - alloc.ModifyTime = now.UTC().UnixNano() + // A set of de-duplicated IDs for jobs that need volume claim GC. + // Later we'll create a gc eval for each job. + jobsWithVolumeGCs := make(map[string]*structs.Job) - // Add an evaluation if this is a failed alloc that is eligible for rescheduling - if alloc.ClientStatus == structs.AllocClientStatusFailed { - // Only create evaluations if this is an existing alloc, - // and eligible as per its task group's ReschedulePolicy - if existingAlloc, _ := n.srv.State().AllocByID(nil, alloc.ID); existingAlloc != nil { - job, err := n.srv.State().JobByID(nil, existingAlloc.Namespace, existingAlloc.JobID) - if err != nil { - n.logger.Error("UpdateAlloc unable to find job", "job", existingAlloc.JobID, "error", err) - continue - } - if job == nil { - n.logger.Debug("UpdateAlloc unable to find job", "job", existingAlloc.JobID) - continue - } - taskGroup := job.LookupTaskGroup(existingAlloc.TaskGroup) - if taskGroup != nil && existingAlloc.FollowupEvalID == "" && existingAlloc.RescheduleEligible(taskGroup.ReschedulePolicy, now) { - eval := &structs.Evaluation{ - ID: uuid.Generate(), - Namespace: existingAlloc.Namespace, - TriggeredBy: structs.EvalTriggerRetryFailedAlloc, - JobID: existingAlloc.JobID, - Type: job.Type, - Priority: job.Priority, - Status: structs.EvalStatusPending, - CreateTime: now.UTC().UnixNano(), - ModifyTime: now.UTC().UnixNano(), - } - evals = append(evals, eval) - } + for _, allocToUpdate := range args.Alloc { + allocToUpdate.ModifyTime = now.UTC().UnixNano() + + if !allocToUpdate.TerminalStatus() { + continue + } + + alloc, _ := n.srv.State().AllocByID(nil, allocToUpdate.ID) + if alloc == nil { + continue + } + + job, err := n.srv.State().JobByID(nil, alloc.Namespace, alloc.JobID) + if err != nil { + n.logger.Error("UpdateAlloc unable to find job", "job", alloc.JobID, "error", err) + continue + } + if job == nil { + n.logger.Debug("UpdateAlloc unable to find job", "job", alloc.JobID) + continue + } + + taskGroup := job.LookupTaskGroup(alloc.TaskGroup) + if taskGroup == nil { + continue + } + + // If the terminal alloc has CSI volumes, add its job to the list + // of jobs we're going to call volume claim GC on. + for _, vol := range taskGroup.Volumes { + if vol.Type == structs.VolumeTypeCSI { + jobsWithVolumeGCs[job.ID] = job } } + + // Add an evaluation if this is a failed alloc that is eligible for rescheduling + if allocToUpdate.ClientStatus == structs.AllocClientStatusFailed && alloc.FollowupEvalID == "" && alloc.RescheduleEligible(taskGroup.ReschedulePolicy, now) { + eval := &structs.Evaluation{ + ID: uuid.Generate(), + Namespace: alloc.Namespace, + TriggeredBy: structs.EvalTriggerRetryFailedAlloc, + JobID: alloc.JobID, + Type: job.Type, + Priority: job.Priority, + Status: structs.EvalStatusPending, + CreateTime: now.UTC().UnixNano(), + ModifyTime: now.UTC().UnixNano(), + } + evals = append(evals, eval) + } + } + + // Add an evaluation for garbage collecting the the CSI volume claims + // of jobs with terminal allocs + for _, job := range jobsWithVolumeGCs { + // we have to build this eval by hand rather than calling srv.CoreJob + // here because we need to use the alloc's namespace + eval := &structs.Evaluation{ + ID: uuid.Generate(), + Namespace: job.Namespace, + Priority: structs.CoreJobPriority, + Type: structs.JobTypeCore, + TriggeredBy: structs.EvalTriggerAllocStop, + JobID: structs.CoreJobCSIVolumeClaimGC + ":" + job.ID, + LeaderACL: n.srv.getLeaderAcl(), + Status: structs.EvalStatusPending, + CreateTime: now.UTC().UnixNano(), + ModifyTime: now.UTC().UnixNano(), + } + evals = append(evals, eval) } // Add this to the batch diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index 9d7ebe5ec..cd20482f4 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -2141,7 +2141,7 @@ func TestClientEndpoint_UpdateAlloc(t *testing.T) { start := time.Now() err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2) require.Nil(err) - require.NotEqual(0, resp2.Index) + require.NotEqual(uint64(0), resp2.Index) if diff := time.Since(start); diff < batchUpdateInterval { t.Fatalf("too fast: %v", diff) @@ -2312,6 +2312,105 @@ func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) { } } +func TestClientEndpoint_UpdateAlloc_UnclaimVolumes(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + codec := rpcClient(t, srv) + state := srv.fsm.State() + ws := memdb.NewWatchSet() + + // Create a client node, plugin, and volume + node := mock.Node() + node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "csi-plugin-example": {PluginID: "csi-plugin-example", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + ControllerInfo: &structs.CSIControllerInfo{}, + }, + } + err := state.UpsertNode(99, node) + require.NoError(t, err) + volId0 := uuid.Generate() + ns := "notTheNamespace" + vols := []*structs.CSIVolume{{ + ID: volId0, + Namespace: ns, + PluginID: "csi-plugin-example", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }} + err = state.CSIVolumeRegister(100, vols) + require.NoError(t, err) + vol, err := state.CSIVolumeByID(ws, ns, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 0) + require.Len(t, vol.WriteAllocs, 0) + + // Create a job with 2 allocations + job := mock.Job() + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + "_": { + Name: "someVolume", + Type: structs.VolumeTypeCSI, + Source: volId0, + ReadOnly: false, + }, + } + err = state.UpsertJob(101, job) + require.NoError(t, err) + + alloc1 := mock.Alloc() + alloc1.JobID = job.ID + alloc1.NodeID = node.ID + err = state.UpsertJobSummary(102, mock.JobSummary(alloc1.JobID)) + require.NoError(t, err) + alloc1.TaskGroup = job.TaskGroups[0].Name + + alloc2 := mock.Alloc() + alloc2.JobID = job.ID + alloc2.NodeID = node.ID + err = state.UpsertJobSummary(103, mock.JobSummary(alloc2.JobID)) + require.NoError(t, err) + alloc2.TaskGroup = job.TaskGroups[0].Name + + err = state.UpsertAllocs(104, []*structs.Allocation{alloc1, alloc2}) + require.NoError(t, err) + + // Claim the volumes and verify the claims were set + err = state.CSIVolumeClaim(105, ns, volId0, alloc1, structs.CSIVolumeClaimWrite) + require.NoError(t, err) + err = state.CSIVolumeClaim(106, ns, volId0, alloc2, structs.CSIVolumeClaimRead) + require.NoError(t, err) + vol, err = state.CSIVolumeByID(ws, ns, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 1) + require.Len(t, vol.WriteAllocs, 1) + + // Update the 1st alloc as terminal/failed + alloc1.ClientStatus = structs.AllocClientStatusFailed + err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", + &structs.AllocUpdateRequest{ + Alloc: []*structs.Allocation{alloc1}, + WriteRequest: structs.WriteRequest{Region: "global"}, + }, &structs.NodeAllocsResponse{}) + require.NoError(t, err) + + // Lookup the alloc and verify status was updated + out, err := state.AllocByID(ws, alloc1.ID) + require.NoError(t, err) + require.Equal(t, structs.AllocClientStatusFailed, out.ClientStatus) + + // Verify the eval for the claim GC was emitted + // Lookup the evaluations + eval, err := state.EvalsByJob(ws, job.Namespace, structs.CoreJobCSIVolumeClaimGC+":"+job.ID) + require.NotNil(t, eval) + require.Nil(t, err) +} + func TestClientEndpoint_CreateNodeEvals(t *testing.T) { t.Parallel() @@ -3235,7 +3334,7 @@ func TestClientEndpoint_EmitEvents(t *testing.T) { var resp structs.GenericResponse err = msgpackrpc.CallWithCodec(codec, "Node.EmitEvents", &req, &resp) require.Nil(err) - require.NotEqual(0, resp.Index) + require.NotEqual(uint64(0), resp.Index) // Check for the node in the FSM ws := memdb.NewWatchSet() diff --git a/nomad/periodic_endpoint_test.go b/nomad/periodic_endpoint_test.go index 0f77b645f..45b70b880 100644 --- a/nomad/periodic_endpoint_test.go +++ b/nomad/periodic_endpoint_test.go @@ -116,7 +116,7 @@ func TestPeriodicEndpoint_Force_ACL(t *testing.T) { req.AuthToken = token.SecretID var resp structs.PeriodicForceResponse assert.Nil(msgpackrpc.CallWithCodec(codec, "Periodic.Force", req, &resp)) - assert.NotEqual(0, resp.Index) + assert.NotEqual(uint64(0), resp.Index) // Lookup the evaluation ws := memdb.NewWatchSet() @@ -132,7 +132,7 @@ func TestPeriodicEndpoint_Force_ACL(t *testing.T) { req.AuthToken = root.SecretID var resp structs.PeriodicForceResponse assert.Nil(msgpackrpc.CallWithCodec(codec, "Periodic.Force", req, &resp)) - assert.NotEqual(0, resp.Index) + assert.NotEqual(uint64(0), resp.Index) // Lookup the evaluation ws := memdb.NewWatchSet() diff --git a/nomad/rpc.go b/nomad/rpc.go index 29461eb40..e5ab6f2a7 100644 --- a/nomad/rpc.go +++ b/nomad/rpc.go @@ -511,7 +511,7 @@ func (r *rpcHandler) forward(method string, info structs.RPCInfo, args interface region := info.RequestRegion() if region == "" { - return true, fmt.Errorf("missing target RPC") + return true, fmt.Errorf("missing region for target RPC") } // Handle region forwarding diff --git a/nomad/search_endpoint.go b/nomad/search_endpoint.go index 4e1b3acfa..3e5eea504 100644 --- a/nomad/search_endpoint.go +++ b/nomad/search_endpoint.go @@ -29,6 +29,8 @@ var ( structs.Nodes, structs.Evals, structs.Deployments, + structs.Plugins, + structs.Volumes, } ) @@ -52,15 +54,19 @@ func (s *Search) getMatches(iter memdb.ResultIterator, prefix string) ([]string, var id string switch t := raw.(type) { case *structs.Job: - id = raw.(*structs.Job).ID + id = t.ID case *structs.Evaluation: - id = raw.(*structs.Evaluation).ID + id = t.ID case *structs.Allocation: - id = raw.(*structs.Allocation).ID + id = t.ID case *structs.Node: - id = raw.(*structs.Node).ID + id = t.ID case *structs.Deployment: - id = raw.(*structs.Deployment).ID + id = t.ID + case *structs.CSIPlugin: + id = t.ID + case *structs.CSIVolume: + id = t.ID default: matchID, ok := getEnterpriseMatch(raw) if !ok { @@ -95,6 +101,10 @@ func getResourceIter(context structs.Context, aclObj *acl.ACL, namespace, prefix return state.NodesByIDPrefix(ws, prefix) case structs.Deployments: return state.DeploymentsByIDPrefix(ws, namespace, prefix) + case structs.Plugins: + return state.CSIPluginsByIDPrefix(ws, prefix) + case structs.Volumes: + return state.CSIVolumesByIDPrefix(ws, namespace, prefix) default: return getEnterpriseResourceIter(context, aclObj, namespace, prefix, ws, state) } diff --git a/nomad/search_endpoint_oss.go b/nomad/search_endpoint_oss.go index aaecf1b34..b4d80c634 100644 --- a/nomad/search_endpoint_oss.go +++ b/nomad/search_endpoint_oss.go @@ -44,7 +44,13 @@ func anySearchPerms(aclObj *acl.ACL, namespace string, context structs.Context) nodeRead := aclObj.AllowNodeRead() jobRead := aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityReadJob) - if !nodeRead && !jobRead { + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIListVolume, + acl.NamespaceCapabilityCSIReadVolume, + acl.NamespaceCapabilityListJobs, + acl.NamespaceCapabilityReadJob) + volRead := allowVolume(aclObj, namespace) + + if !nodeRead && !jobRead && !volRead { return false } @@ -60,6 +66,9 @@ func anySearchPerms(aclObj *acl.ACL, namespace string, context structs.Context) return false } } + if !volRead && context == structs.Volumes { + return false + } return true } @@ -83,6 +92,11 @@ func searchContexts(aclObj *acl.ACL, namespace string, context structs.Context) } jobRead := aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityReadJob) + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIListVolume, + acl.NamespaceCapabilityCSIReadVolume, + acl.NamespaceCapabilityListJobs, + acl.NamespaceCapabilityReadJob) + volRead := allowVolume(aclObj, namespace) // Filter contexts down to those the ACL grants access to available := make([]structs.Context, 0, len(all)) @@ -96,6 +110,10 @@ func searchContexts(aclObj *acl.ACL, namespace string, context structs.Context) if aclObj.AllowNodeRead() { available = append(available, c) } + case structs.Volumes: + if volRead { + available = append(available, c) + } } } return available diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index 31c3ae72c..3ee3d3afb 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -7,10 +7,12 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const jobIndex = 1000 @@ -746,3 +748,77 @@ func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { assert.Equal(job.ID, resp.Matches[structs.Jobs][0]) assert.Equal(uint64(jobIndex), resp.Index) } + +func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) { + t.Parallel() + assert := assert.New(t) + + s, cleanupS := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 + }) + defer cleanupS() + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + id := uuid.Generate() + CreateTestCSIPlugin(s.fsm.State(), id) + + prefix := id[:len(id)-2] + + req := &structs.SearchRequest{ + Prefix: prefix, + Context: structs.Plugins, + QueryOptions: structs.QueryOptions{ + Region: "global", + }, + } + + var resp structs.SearchResponse + if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + assert.Equal(1, len(resp.Matches[structs.Plugins])) + assert.Equal(id, resp.Matches[structs.Plugins][0]) + assert.Equal(resp.Truncations[structs.Plugins], false) +} + +func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { + t.Parallel() + assert := assert.New(t) + + s, cleanupS := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 + }) + defer cleanupS() + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + id := uuid.Generate() + err := s.fsm.State().CSIVolumeRegister(1000, []*structs.CSIVolume{{ + ID: id, + Namespace: structs.DefaultNamespace, + PluginID: "glade", + }}) + require.NoError(t, err) + + prefix := id[:len(id)-2] + + req := &structs.SearchRequest{ + Prefix: prefix, + Context: structs.Volumes, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: structs.DefaultNamespace, + }, + } + + var resp structs.SearchResponse + if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + assert.Equal(1, len(resp.Matches[structs.Volumes])) + assert.Equal(id, resp.Matches[structs.Volumes][0]) + assert.Equal(resp.Truncations[structs.Volumes], false) +} diff --git a/nomad/server.go b/nomad/server.go index 0fb95e05d..921d9901b 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -249,6 +249,8 @@ type endpoints struct { Eval *Eval Plan *Plan Alloc *Alloc + CSIVolume *CSIVolume + CSIPlugin *CSIPlugin Deployment *Deployment Region *Region Search *Search @@ -259,10 +261,11 @@ type endpoints struct { Enterprise *EnterpriseEndpoints // Client endpoints - ClientStats *ClientStats - FileSystem *FileSystem - Agent *Agent - ClientAllocations *ClientAllocations + ClientStats *ClientStats + FileSystem *FileSystem + Agent *Agent + ClientAllocations *ClientAllocations + ClientCSIController *ClientCSIController } // NewServer is used to construct a new Nomad server from the @@ -1092,6 +1095,8 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { s.staticEndpoints.Eval = &Eval{srv: s, logger: s.logger.Named("eval")} s.staticEndpoints.Job = NewJobEndpoints(s) s.staticEndpoints.Node = &Node{srv: s, logger: s.logger.Named("client")} // Add but don't register + s.staticEndpoints.CSIVolume = &CSIVolume{srv: s, logger: s.logger.Named("csi_volume")} + s.staticEndpoints.CSIPlugin = &CSIPlugin{srv: s, logger: s.logger.Named("csi_plugin")} s.staticEndpoints.Deployment = &Deployment{srv: s, logger: s.logger.Named("deployment")} s.staticEndpoints.Operator = &Operator{srv: s, logger: s.logger.Named("operator")} s.staticEndpoints.Periodic = &Periodic{srv: s, logger: s.logger.Named("periodic")} @@ -1106,6 +1111,7 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { s.staticEndpoints.ClientStats = &ClientStats{srv: s, logger: s.logger.Named("client_stats")} s.staticEndpoints.ClientAllocations = &ClientAllocations{srv: s, logger: s.logger.Named("client_allocs")} s.staticEndpoints.ClientAllocations.register() + s.staticEndpoints.ClientCSIController = &ClientCSIController{srv: s, logger: s.logger.Named("client_csi")} // Streaming endpoints s.staticEndpoints.FileSystem = &FileSystem{srv: s, logger: s.logger.Named("client_fs")} @@ -1120,6 +1126,8 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { server.Register(s.staticEndpoints.Alloc) server.Register(s.staticEndpoints.Eval) server.Register(s.staticEndpoints.Job) + server.Register(s.staticEndpoints.CSIVolume) + server.Register(s.staticEndpoints.CSIPlugin) server.Register(s.staticEndpoints.Deployment) server.Register(s.staticEndpoints.Operator) server.Register(s.staticEndpoints.Periodic) @@ -1131,6 +1139,7 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { s.staticEndpoints.Enterprise.Register(server) server.Register(s.staticEndpoints.ClientStats) server.Register(s.staticEndpoints.ClientAllocations) + server.Register(s.staticEndpoints.ClientCSIController) server.Register(s.staticEndpoints.FileSystem) server.Register(s.staticEndpoints.Agent) diff --git a/nomad/state/iterator.go b/nomad/state/iterator.go new file mode 100644 index 000000000..2c0efb9ad --- /dev/null +++ b/nomad/state/iterator.go @@ -0,0 +1,30 @@ +package state + +type SliceIterator struct { + data []interface{} + idx int +} + +func NewSliceIterator() *SliceIterator { + return &SliceIterator{ + data: []interface{}{}, + idx: 0, + } +} + +func (i *SliceIterator) Add(datum interface{}) { + i.data = append(i.data, datum) +} + +func (i *SliceIterator) Next() interface{} { + if i.idx == len(i.data) { + return nil + } + idx := i.idx + i.idx += 1 + return i.data[idx] +} + +func (i *SliceIterator) WatchCh() <-chan struct{} { + return nil +} diff --git a/nomad/state/schema.go b/nomad/state/schema.go index a5ec55e71..b58bf6855 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -47,6 +47,8 @@ func init() { autopilotConfigTableSchema, schedulerConfigTableSchema, clusterMetaTableSchema, + csiVolumeTableSchema, + csiPluginTableSchema, }...) } @@ -677,3 +679,52 @@ func clusterMetaTableSchema() *memdb.TableSchema { }, } } + +// CSIVolumes are identified by id globally, and searchable by driver +func csiVolumeTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: "csi_volumes", + Indexes: map[string]*memdb.IndexSchema{ + "id": { + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Namespace", + }, + &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + }, + }, + "plugin_id": { + Name: "plugin_id", + AllowMissing: false, + Unique: false, + Indexer: &memdb.StringFieldIndex{ + Field: "PluginID", + }, + }, + }, + } +} + +// CSIPlugins are identified by id globally, and searchable by driver +func csiPluginTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: "csi_plugins", + Indexes: map[string]*memdb.IndexSchema{ + "id": { + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + }, + } +} diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index df3fe4d30..ad2493d39 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -677,6 +677,9 @@ func (s *StateStore) UpsertNode(index uint64, node *structs.Node) error { if err := txn.Insert("index", &IndexEntry{"nodes", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } + if err := upsertNodeCSIPlugins(txn, node, index); err != nil { + return fmt.Errorf("csi plugin update failed: %v", err) + } txn.Commit() return nil @@ -704,6 +707,11 @@ func (s *StateStore) DeleteNode(index uint64, nodes []string) error { if err := txn.Delete("nodes", existing); err != nil { return fmt.Errorf("node delete failed: %s: %v", nodeID, err) } + + node := existing.(*structs.Node) + if err := deleteNodeCSIPlugins(txn, node, index); err != nil { + return fmt.Errorf("csi plugin delete failed: %v", err) + } } if err := txn.Insert("index", &IndexEntry{"nodes", index}); err != nil { @@ -931,6 +939,107 @@ func appendNodeEvents(index uint64, node *structs.Node, events []*structs.NodeEv } } +// upsertNodeCSIPlugins indexes csi plugins for volume retrieval, with health. It's called +// on upsertNodeEvents, so that event driven health changes are updated +func upsertNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) error { + if len(node.CSIControllerPlugins) == 0 && len(node.CSINodePlugins) == 0 { + return nil + } + + loop := func(info *structs.CSIInfo) error { + raw, err := txn.First("csi_plugins", "id", info.PluginID) + if err != nil { + return fmt.Errorf("csi_plugin lookup error: %s %v", info.PluginID, err) + } + + var plug *structs.CSIPlugin + if raw != nil { + plug = raw.(*structs.CSIPlugin).Copy() + } else { + plug = structs.NewCSIPlugin(info.PluginID, index) + plug.Provider = info.Provider + plug.Version = info.ProviderVersion + } + + plug.AddPlugin(node.ID, info) + plug.ModifyIndex = index + + err = txn.Insert("csi_plugins", plug) + if err != nil { + return fmt.Errorf("csi_plugins insert error: %v", err) + } + + return nil + } + + for _, info := range node.CSIControllerPlugins { + err := loop(info) + if err != nil { + return err + } + } + + for _, info := range node.CSINodePlugins { + err := loop(info) + if err != nil { + return err + } + } + + if err := txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + + return nil +} + +// deleteNodeCSIPlugins cleans up CSIInfo node health status, called in DeleteNode +func deleteNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) error { + if len(node.CSIControllerPlugins) == 0 && len(node.CSINodePlugins) == 0 { + return nil + } + + names := map[string]struct{}{} + for _, info := range node.CSIControllerPlugins { + names[info.PluginID] = struct{}{} + } + for _, info := range node.CSINodePlugins { + names[info.PluginID] = struct{}{} + } + + for id := range names { + raw, err := txn.First("csi_plugins", "id", id) + if err != nil { + return fmt.Errorf("csi_plugins lookup error %s: %v", id, err) + } + if raw == nil { + return fmt.Errorf("csi_plugins missing plugin %s", id) + } + + plug := raw.(*structs.CSIPlugin).Copy() + plug.DeleteNode(node.ID) + plug.ModifyIndex = index + + if plug.IsEmpty() { + err = txn.Delete("csi_plugins", plug) + if err != nil { + return fmt.Errorf("csi_plugins delete error: %v", err) + } + } else { + err = txn.Insert("csi_plugins", plug) + if err != nil { + return fmt.Errorf("csi_plugins update error %s: %v", id, err) + } + } + } + + if err := txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + + return nil +} + // NodeByID is used to lookup a node by ID func (s *StateStore) NodeByID(ws memdb.WatchSet, nodeID string) (*structs.Node, error) { txn := s.db.Txn(false) @@ -1511,6 +1620,363 @@ func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, namespace, id string) return iter, nil } +// CSIVolumeRegister adds a volume to the server store, failing if it already exists +func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolume) error { + txn := s.db.Txn(true) + defer txn.Abort() + + for _, v := range volumes { + // Check for volume existence + obj, err := txn.First("csi_volumes", "id", v.Namespace, v.ID) + if err != nil { + return fmt.Errorf("volume existence check error: %v", err) + } + if obj != nil { + return fmt.Errorf("volume exists: %s", v.ID) + } + + if v.CreateIndex == 0 { + v.CreateIndex = index + v.ModifyIndex = index + } + + err = txn.Insert("csi_volumes", v) + if err != nil { + return fmt.Errorf("volume insert: %v", err) + } + } + + if err := txn.Insert("index", &IndexEntry{"csi_volumes", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + + txn.Commit() + return nil +} + +// CSIVolumeByID is used to lookup a single volume. Returns a copy of the volume +// because its plugins are denormalized to provide accurate Health. +func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, namespace, id string) (*structs.CSIVolume, error) { + txn := s.db.Txn(false) + + watchCh, obj, err := txn.FirstWatch("csi_volumes", "id_prefix", namespace, id) + if err != nil { + return nil, fmt.Errorf("volume lookup failed: %s %v", id, err) + } + ws.Add(watchCh) + + if obj == nil { + return nil, nil + } + + vol := obj.(*structs.CSIVolume) + return s.CSIVolumeDenormalizePlugins(ws, vol.Copy()) +} + +// CSIVolumes looks up csi_volumes by pluginID +func (s *StateStore) CSIVolumesByPluginID(ws memdb.WatchSet, namespace, pluginID string) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + + iter, err := txn.Get("csi_volumes", "plugin_id", pluginID) + if err != nil { + return nil, fmt.Errorf("volume lookup failed: %v", err) + } + + // Filter the iterator by namespace + f := func(raw interface{}) bool { + v, ok := raw.(*structs.CSIVolume) + if !ok { + return false + } + return v.Namespace != namespace + } + + wrap := memdb.NewFilterIterator(iter, f) + return wrap, nil +} + +// CSIVolumesByIDPrefix supports search +func (s *StateStore) CSIVolumesByIDPrefix(ws memdb.WatchSet, namespace, volumeID string) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + + iter, err := txn.Get("csi_volumes", "id_prefix", namespace, volumeID) + if err != nil { + return nil, err + } + + ws.Add(iter.WatchCh()) + return iter, nil +} + +// CSIVolumesByNodeID looks up CSIVolumes in use on a node +func (s *StateStore) CSIVolumesByNodeID(ws memdb.WatchSet, namespace, nodeID string) (memdb.ResultIterator, error) { + allocs, err := s.AllocsByNode(ws, nodeID) + if err != nil { + return nil, fmt.Errorf("alloc lookup failed: %v", err) + } + snap, err := s.Snapshot() + if err != nil { + return nil, fmt.Errorf("alloc lookup failed: %v", err) + } + + allocs, err = snap.DenormalizeAllocationSlice(allocs) + if err != nil { + return nil, fmt.Errorf("alloc lookup failed: %v", err) + } + + // Find volume ids for CSI volumes in running allocs, or allocs that we desire to run + ids := map[string]struct{}{} + for _, a := range allocs { + tg := a.Job.LookupTaskGroup(a.TaskGroup) + + if !(a.DesiredStatus == structs.AllocDesiredStatusRun || + a.ClientStatus == structs.AllocClientStatusRunning) || + len(tg.Volumes) == 0 { + continue + } + + for _, v := range tg.Volumes { + if v.Type != structs.VolumeTypeCSI { + continue + } + ids[v.Source] = struct{}{} + } + } + + // Lookup the raw CSIVolumes to match the other list interfaces + iter := NewSliceIterator() + txn := s.db.Txn(false) + for id := range ids { + raw, err := txn.First("csi_volumes", "id", namespace, id) + if err != nil { + return nil, fmt.Errorf("volume lookup failed: %s %v", id, err) + } + iter.Add(raw) + } + + return iter, nil +} + +// CSIVolumesByNamespace looks up the entire csi_volumes table +func (s *StateStore) CSIVolumesByNamespace(ws memdb.WatchSet, namespace string) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + + iter, err := txn.Get("csi_volumes", "id_prefix", namespace, "") + if err != nil { + return nil, fmt.Errorf("volume lookup failed: %v", err) + } + ws.Add(iter.WatchCh()) + + return iter, nil +} + +// CSIVolumeClaim updates the volume's claim count and allocation list +func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, alloc *structs.Allocation, claim structs.CSIVolumeClaimMode) error { + txn := s.db.Txn(true) + defer txn.Abort() + + row, err := txn.First("csi_volumes", "id", namespace, id) + if err != nil { + return fmt.Errorf("volume lookup failed: %s: %v", id, err) + } + if row == nil { + return fmt.Errorf("volume not found: %s", id) + } + + orig, ok := row.(*structs.CSIVolume) + if !ok { + return fmt.Errorf("volume row conversion error") + } + + ws := memdb.NewWatchSet() + volume, err := s.CSIVolumeDenormalizePlugins(ws, orig.Copy()) + if err != nil { + return err + } + if !volume.Claim(claim, alloc) { + return fmt.Errorf("volume max claim reached") + } + + volume.ModifyIndex = index + + if err = txn.Insert("csi_volumes", volume); err != nil { + return fmt.Errorf("volume update failed: %s: %v", id, err) + } + + if err = txn.Insert("index", &IndexEntry{"csi_volumes", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + + txn.Commit() + return nil +} + +// CSIVolumeDeregister removes the volume from the server +func (s *StateStore) CSIVolumeDeregister(index uint64, namespace string, ids []string) error { + txn := s.db.Txn(true) + defer txn.Abort() + + for _, id := range ids { + existing, err := txn.First("csi_volumes", "id_prefix", namespace, id) + if err != nil { + return fmt.Errorf("volume lookup failed: %s: %v", id, err) + } + + if existing == nil { + return fmt.Errorf("volume not found: %s", id) + } + + if err = txn.Delete("csi_volumes", existing); err != nil { + return fmt.Errorf("volume delete failed: %s: %v", id, err) + } + } + + if err := txn.Insert("index", &IndexEntry{"csi_volumes", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + + txn.Commit() + return nil +} + +// CSIVolumeDenormalizePlugins returns a CSIVolume with current health and plugins, but +// without allocations +// Use this for current volume metadata, handling lists of volumes +// Use CSIVolumeDenormalize for volumes containing both health and current allocations +func (s *StateStore) CSIVolumeDenormalizePlugins(ws memdb.WatchSet, vol *structs.CSIVolume) (*structs.CSIVolume, error) { + if vol == nil { + return nil, nil + } + + // Lookup CSIPlugin, the health records, and calculate volume health + txn := s.db.Txn(false) + defer txn.Abort() + + plug, err := s.CSIPluginByID(ws, vol.PluginID) + if err != nil { + return nil, fmt.Errorf("plugin lookup error: %s %v", vol.PluginID, err) + } + if plug == nil { + vol.ControllersHealthy = 0 + vol.NodesHealthy = 0 + vol.Schedulable = false + return vol, nil + } + + vol.Provider = plug.Provider + vol.ProviderVersion = plug.Version + vol.ControllerRequired = plug.ControllerRequired + vol.ControllersHealthy = plug.ControllersHealthy + vol.NodesHealthy = plug.NodesHealthy + // This number is incorrect! The expected number of node plugins is actually this + + // the number of blocked evaluations for the jobs controlling these plugins + vol.ControllersExpected = len(plug.Controllers) + vol.NodesExpected = len(plug.Nodes) + + vol.Schedulable = vol.NodesHealthy > 0 + if vol.ControllerRequired { + vol.Schedulable = vol.ControllersHealthy > 0 && vol.Schedulable + } + + return vol, nil +} + +// csiVolumeDenormalizeAllocs returns a CSIVolume with allocations +func (s *StateStore) CSIVolumeDenormalize(ws memdb.WatchSet, vol *structs.CSIVolume) (*structs.CSIVolume, error) { + for id := range vol.ReadAllocs { + a, err := s.AllocByID(ws, id) + if err != nil { + return nil, err + } + vol.ReadAllocs[id] = a + } + + for id := range vol.WriteAllocs { + a, err := s.AllocByID(ws, id) + if err != nil { + return nil, err + } + vol.WriteAllocs[id] = a + } + + return vol, nil +} + +// CSIPlugins returns the unfiltered list of all plugin health status +func (s *StateStore) CSIPlugins(ws memdb.WatchSet) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + defer txn.Abort() + + iter, err := txn.Get("csi_plugins", "id") + if err != nil { + return nil, fmt.Errorf("csi_plugins lookup failed: %v", err) + } + + return iter, nil +} + +// CSIPluginsByIDPrefix supports search +func (s *StateStore) CSIPluginsByIDPrefix(ws memdb.WatchSet, pluginID string) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + + iter, err := txn.Get("csi_plugins", "id_prefix", pluginID) + if err != nil { + return nil, err + } + + ws.Add(iter.WatchCh()) + + return iter, nil +} + +// CSIPluginByID returns the one named CSIPlugin +func (s *StateStore) CSIPluginByID(ws memdb.WatchSet, id string) (*structs.CSIPlugin, error) { + txn := s.db.Txn(false) + defer txn.Abort() + + raw, err := txn.First("csi_plugins", "id_prefix", id) + if err != nil { + return nil, fmt.Errorf("csi_plugin lookup failed: %s %v", id, err) + } + + if raw == nil { + return nil, nil + } + + plug := raw.(*structs.CSIPlugin) + + return plug, nil +} + +// CSIPluginDenormalize returns a CSIPlugin with allocation details +func (s *StateStore) CSIPluginDenormalize(ws memdb.WatchSet, plug *structs.CSIPlugin) (*structs.CSIPlugin, error) { + if plug == nil { + return nil, nil + } + + // Get the unique list of allocation ids + ids := map[string]struct{}{} + for _, info := range plug.Controllers { + ids[info.AllocID] = struct{}{} + } + for _, info := range plug.Nodes { + ids[info.AllocID] = struct{}{} + } + + for id := range ids { + alloc, err := s.AllocByID(ws, id) + if err != nil { + return nil, err + } + if alloc == nil { + continue + } + plug.Allocations = append(plug.Allocations, alloc.Stub()) + } + + return plug, nil +} + // UpsertPeriodicLaunch is used to register a launch or update it. func (s *StateStore) UpsertPeriodicLaunch(index uint64, launch *structs.PeriodicLaunch) error { txn := s.db.Txn(true) @@ -2325,8 +2791,8 @@ func (s *StateStore) AllocsByNodeTerminal(ws memdb.WatchSet, node string, termin return out, nil } -// AllocsByJob returns all the allocations by job id -func (s *StateStore) AllocsByJob(ws memdb.WatchSet, namespace, jobID string, all bool) ([]*structs.Allocation, error) { +// AllocsByJob returns allocations by job id +func (s *StateStore) AllocsByJob(ws memdb.WatchSet, namespace, jobID string, anyCreateIndex bool) ([]*structs.Allocation, error) { txn := s.db.Txn(false) // Get the job @@ -2358,7 +2824,7 @@ func (s *StateStore) AllocsByJob(ws memdb.WatchSet, namespace, jobID string, all // If the allocation belongs to a job with the same ID but a different // create index and we are not getting all the allocations whose Jobs // matches the same Job ID then we skip it - if !all && job != nil && alloc.Job.CreateIndex != job.CreateIndex { + if !anyCreateIndex && job != nil && alloc.Job.CreateIndex != job.CreateIndex { continue } out = append(out, raw.(*structs.Allocation)) diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index fb1d51b74..97522e2a7 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2826,6 +2826,229 @@ func TestStateStore_RestoreJobSummary(t *testing.T) { } } +// TestStateStore_CSIVolume checks register, list and deregister for csi_volumes +func TestStateStore_CSIVolume(t *testing.T) { + state := testStateStore(t) + index := uint64(1000) + + // Volume IDs + vol0, vol1 := uuid.Generate(), uuid.Generate() + + // Create a node running a healthy instance of the plugin + node := mock.Node() + pluginID := "minnie" + alloc := mock.Alloc() + alloc.DesiredStatus = "run" + alloc.ClientStatus = "running" + alloc.NodeID = node.ID + alloc.Job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + "foo": { + Name: "foo", + Source: vol0, + Type: "csi", + }, + } + + node.CSINodePlugins = map[string]*structs.CSIInfo{ + pluginID: { + PluginID: pluginID, + AllocID: alloc.ID, + Healthy: true, + HealthDescription: "healthy", + RequiresControllerPlugin: false, + RequiresTopologies: false, + NodeInfo: &structs.CSINodeInfo{ + ID: node.ID, + MaxVolumes: 64, + RequiresNodeStageVolume: true, + }, + }, + } + + index++ + err := state.UpsertNode(index, node) + require.NoError(t, err) + defer state.DeleteNode(9999, []string{pluginID}) + + index++ + err = state.UpsertAllocs(index, []*structs.Allocation{alloc}) + require.NoError(t, err) + + ns := structs.DefaultNamespace + + v0 := structs.NewCSIVolume("foo", index) + v0.ID = vol0 + v0.Namespace = ns + v0.PluginID = "minnie" + v0.Schedulable = true + v0.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter + v0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + + index++ + v1 := structs.NewCSIVolume("foo", index) + v1.ID = vol1 + v1.Namespace = ns + v1.PluginID = "adam" + v1.Schedulable = true + v1.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter + v1.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + + index++ + err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0, v1}) + require.NoError(t, err) + + ws := memdb.NewWatchSet() + iter, err := state.CSIVolumesByNamespace(ws, ns) + require.NoError(t, err) + + slurp := func(iter memdb.ResultIterator) (vs []*structs.CSIVolume) { + for { + raw := iter.Next() + if raw == nil { + break + } + vol := raw.(*structs.CSIVolume) + vs = append(vs, vol) + } + return vs + } + + vs := slurp(iter) + require.Equal(t, 2, len(vs)) + + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByPluginID(ws, ns, "minnie") + require.NoError(t, err) + vs = slurp(iter) + require.Equal(t, 1, len(vs)) + + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByNodeID(ws, ns, node.ID) + require.NoError(t, err) + vs = slurp(iter) + require.Equal(t, 1, len(vs)) + + index++ + err = state.CSIVolumeDeregister(index, ns, []string{ + vol1, + }) + require.NoError(t, err) + + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByPluginID(ws, ns, "adam") + require.NoError(t, err) + vs = slurp(iter) + require.Equal(t, 0, len(vs)) + + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByNamespace(ws, ns) + require.NoError(t, err) + vs = slurp(iter) + require.Equal(t, 1, len(vs)) + + // Claims + a0 := &structs.Allocation{ID: "al"} + a1 := &structs.Allocation{ID: "gator"} + r := structs.CSIVolumeClaimRead + w := structs.CSIVolumeClaimWrite + u := structs.CSIVolumeClaimRelease + + index++ + err = state.CSIVolumeClaim(index, ns, vol0, a0, r) + require.NoError(t, err) + index++ + err = state.CSIVolumeClaim(index, ns, vol0, a1, w) + require.NoError(t, err) + + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByPluginID(ws, ns, "minnie") + require.NoError(t, err) + vs = slurp(iter) + require.False(t, vs[0].CanWrite()) + + err = state.CSIVolumeClaim(2, ns, vol0, a0, u) + require.NoError(t, err) + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByPluginID(ws, ns, "minnie") + require.NoError(t, err) + vs = slurp(iter) + require.True(t, vs[0].CanReadOnly()) +} + +// TestStateStore_CSIPluginNodes uses the state from jobs, and uses node fingerprinting to update health +func TestStateStore_CSIPluginNodes(t *testing.T) { + index := uint64(999) + state := testStateStore(t) + testStateStore_CSIPluginNodes(t, index, state) +} + +func testStateStore_CSIPluginNodes(t *testing.T, index uint64, state *StateStore) (uint64, *StateStore) { + // Create Nodes fingerprinting the plugins + ns := []*structs.Node{mock.Node(), mock.Node(), mock.Node()} + + for _, n := range ns { + index++ + err := state.UpsertNode(index, n) + require.NoError(t, err) + } + + // Fingerprint a running controller plugin + n0 := ns[0].Copy() + n0.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + UpdateTime: time.Now(), + RequiresControllerPlugin: true, + RequiresTopologies: false, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsReadOnlyAttach: true, + SupportsListVolumes: true, + }, + }, + } + + index++ + err := state.UpsertNode(index, n0) + require.NoError(t, err) + + // Fingerprint two running node plugins + for _, n := range ns[1:] { + n = n.Copy() + n.CSINodePlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + UpdateTime: time.Now(), + RequiresControllerPlugin: true, + RequiresTopologies: false, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + + index++ + err = state.UpsertNode(index, n) + require.NoError(t, err) + } + + ws := memdb.NewWatchSet() + plug, err := state.CSIPluginByID(ws, "foo") + require.NoError(t, err) + + require.Equal(t, "foo", plug.ID) + require.Equal(t, 1, plug.ControllersHealthy) + require.Equal(t, 2, plug.NodesHealthy) + + return index, state +} + +// TestStateStore_CSIPluginBackwards gets the node state first, and the job state second +func TestStateStore_CSIPluginBackwards(t *testing.T) { + index := uint64(999) + state := testStateStore(t) + index, state = testStateStore_CSIPluginNodes(t, index, state) +} + func TestStateStore_Indexes(t *testing.T) { t.Parallel() diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go new file mode 100644 index 000000000..bbd19b99a --- /dev/null +++ b/nomad/structs/csi.go @@ -0,0 +1,687 @@ +package structs + +import ( + "fmt" + "strings" + "time" +) + +// CSISocketName is the filename that Nomad expects plugins to create inside the +// PluginMountDir. +const CSISocketName = "csi.sock" + +// CSIIntermediaryDirname is the name of the directory inside the PluginMountDir +// where Nomad will expect plugins to create intermediary mounts for volumes. +const CSIIntermediaryDirname = "volumes" + +// VolumeTypeCSI is the type in the volume stanza of a TaskGroup +const VolumeTypeCSI = "csi" + +// CSIPluginType is an enum string that encapsulates the valid options for a +// CSIPlugin stanza's Type. These modes will allow the plugin to be used in +// different ways by the client. +type CSIPluginType string + +const ( + // CSIPluginTypeNode indicates that Nomad should only use the plugin for + // performing Node RPCs against the provided plugin. + CSIPluginTypeNode CSIPluginType = "node" + + // CSIPluginTypeController indicates that Nomad should only use the plugin for + // performing Controller RPCs against the provided plugin. + CSIPluginTypeController CSIPluginType = "controller" + + // CSIPluginTypeMonolith indicates that Nomad can use the provided plugin for + // both controller and node rpcs. + CSIPluginTypeMonolith CSIPluginType = "monolith" +) + +// CSIPluginTypeIsValid validates the given CSIPluginType string and returns +// true only when a correct plugin type is specified. +func CSIPluginTypeIsValid(pt CSIPluginType) bool { + switch pt { + case CSIPluginTypeNode, CSIPluginTypeController, CSIPluginTypeMonolith: + return true + default: + return false + } +} + +// TaskCSIPluginConfig contains the data that is required to setup a task as a +// CSI plugin. This will be used by the csi_plugin_supervisor_hook to configure +// mounts for the plugin and initiate the connection to the plugin catalog. +type TaskCSIPluginConfig struct { + // ID is the identifier of the plugin. + // Ideally this should be the FQDN of the plugin. + ID string + + // Type instructs Nomad on how to handle processing a plugin + Type CSIPluginType + + // MountDir is the destination that nomad should mount in its CSI + // directory for the plugin. It will then expect a file called CSISocketName + // to be created by the plugin, and will provide references into + // "MountDir/CSIIntermediaryDirname/{VolumeName}/{AllocID} for mounts. + MountDir string +} + +func (t *TaskCSIPluginConfig) Copy() *TaskCSIPluginConfig { + if t == nil { + return nil + } + + nt := new(TaskCSIPluginConfig) + *nt = *t + + return nt +} + +// CSIVolumeAttachmentMode chooses the type of storage api that will be used to +// interact with the device. +type CSIVolumeAttachmentMode string + +const ( + CSIVolumeAttachmentModeUnknown CSIVolumeAttachmentMode = "" + CSIVolumeAttachmentModeBlockDevice CSIVolumeAttachmentMode = "block-device" + CSIVolumeAttachmentModeFilesystem CSIVolumeAttachmentMode = "file-system" +) + +func ValidCSIVolumeAttachmentMode(attachmentMode CSIVolumeAttachmentMode) bool { + switch attachmentMode { + case CSIVolumeAttachmentModeBlockDevice, CSIVolumeAttachmentModeFilesystem: + return true + default: + return false + } +} + +// CSIVolumeAccessMode indicates how a volume should be used in a storage topology +// e.g whether the provider should make the volume available concurrently. +type CSIVolumeAccessMode string + +const ( + CSIVolumeAccessModeUnknown CSIVolumeAccessMode = "" + + CSIVolumeAccessModeSingleNodeReader CSIVolumeAccessMode = "single-node-reader-only" + CSIVolumeAccessModeSingleNodeWriter CSIVolumeAccessMode = "single-node-writer" + + CSIVolumeAccessModeMultiNodeReader CSIVolumeAccessMode = "multi-node-reader-only" + CSIVolumeAccessModeMultiNodeSingleWriter CSIVolumeAccessMode = "multi-node-single-writer" + CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" +) + +// ValidCSIVolumeAccessMode checks to see that the provided access mode is a valid, +// non-empty access mode. +func ValidCSIVolumeAccessMode(accessMode CSIVolumeAccessMode) bool { + switch accessMode { + case CSIVolumeAccessModeSingleNodeReader, CSIVolumeAccessModeSingleNodeWriter, + CSIVolumeAccessModeMultiNodeReader, CSIVolumeAccessModeMultiNodeSingleWriter, + CSIVolumeAccessModeMultiNodeMultiWriter: + return true + default: + return false + } +} + +// ValidCSIVolumeAccessMode checks for a writable access mode +func ValidCSIVolumeWriteAccessMode(accessMode CSIVolumeAccessMode) bool { + switch accessMode { + case CSIVolumeAccessModeSingleNodeWriter, + CSIVolumeAccessModeMultiNodeSingleWriter, + CSIVolumeAccessModeMultiNodeMultiWriter: + return true + default: + return false + } +} + +// CSIMountOptions contain optional additional configuration that can be used +// when specifying that a Volume should be used with VolumeAccessTypeMount. +type CSIMountOptions struct { + // FSType is an optional field that allows an operator to specify the type + // of the filesystem. + FSType string + + // MountFlags contains additional options that may be used when mounting the + // volume by the plugin. This may contain sensitive data and should not be + // leaked. + MountFlags []string +} + +func (o *CSIMountOptions) Copy() *CSIMountOptions { + if o == nil { + return nil + } + return &(*o) +} + +func (o *CSIMountOptions) Merge(p *CSIMountOptions) { + if p == nil { + return + } + if p.FSType != "" { + o.FSType = p.FSType + } + if p.MountFlags != nil { + o.MountFlags = p.MountFlags + } +} + +// VolumeMountOptions implements the Stringer and GoStringer interfaces to prevent +// accidental leakage of sensitive mount flags via logs. +var _ fmt.Stringer = &CSIMountOptions{} +var _ fmt.GoStringer = &CSIMountOptions{} + +func (v *CSIMountOptions) String() string { + mountFlagsString := "nil" + if len(v.MountFlags) != 0 { + mountFlagsString = "[REDACTED]" + } + + return fmt.Sprintf("csi.CSIOptions(FSType: %s, MountFlags: %s)", v.FSType, mountFlagsString) +} + +func (v *CSIMountOptions) GoString() string { + return v.String() +} + +// CSIVolume is the full representation of a CSI Volume +type CSIVolume struct { + // ID is a namespace unique URL safe identifier for the volume + ID string + // Name is a display name for the volume, not required to be unique + Name string + // ExternalID identifies the volume for the CSI interface, may be URL unsafe + ExternalID string + Namespace string + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode + MountOptions *CSIMountOptions + + // Allocations, tracking claim status + ReadAllocs map[string]*Allocation + WriteAllocs map[string]*Allocation + + // Schedulable is true if all the denormalized plugin health fields are true, and the + // volume has not been marked for garbage collection + Schedulable bool + PluginID string + Provider string + ProviderVersion string + ControllerRequired bool + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + ResourceExhausted time.Time + + CreateIndex uint64 + ModifyIndex uint64 +} + +// CSIVolListStub is partial representation of a CSI Volume for inclusion in lists +type CSIVolListStub struct { + ID string + Namespace string + Name string + ExternalID string + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode + MountOptions *CSIMountOptions + CurrentReaders int + CurrentWriters int + Schedulable bool + PluginID string + Provider string + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + CreateIndex uint64 + ModifyIndex uint64 +} + +// NewCSIVolume creates the volume struct. No side-effects +func NewCSIVolume(pluginID string, index uint64) *CSIVolume { + out := &CSIVolume{ + ID: pluginID, + CreateIndex: index, + ModifyIndex: index, + } + + out.newStructs() + return out +} + +func (v *CSIVolume) newStructs() { + if v.Topologies == nil { + v.Topologies = []*CSITopology{} + } + + v.ReadAllocs = map[string]*Allocation{} + v.WriteAllocs = map[string]*Allocation{} +} + +func (v *CSIVolume) RemoteID() string { + if v.ExternalID != "" { + return v.ExternalID + } + return v.ID +} + +func (v *CSIVolume) Stub() *CSIVolListStub { + stub := CSIVolListStub{ + ID: v.ID, + Namespace: v.Namespace, + Name: v.Name, + ExternalID: v.ExternalID, + Topologies: v.Topologies, + AccessMode: v.AccessMode, + AttachmentMode: v.AttachmentMode, + MountOptions: v.MountOptions, + CurrentReaders: len(v.ReadAllocs), + CurrentWriters: len(v.WriteAllocs), + Schedulable: v.Schedulable, + PluginID: v.PluginID, + Provider: v.Provider, + ControllersHealthy: v.ControllersHealthy, + NodesHealthy: v.NodesHealthy, + NodesExpected: v.NodesExpected, + CreateIndex: v.CreateIndex, + ModifyIndex: v.ModifyIndex, + } + + return &stub +} + +func (v *CSIVolume) CanReadOnly() bool { + if !v.Schedulable { + return false + } + + return v.ResourceExhausted == time.Time{} +} + +func (v *CSIVolume) CanWrite() bool { + if !v.Schedulable { + return false + } + + switch v.AccessMode { + case CSIVolumeAccessModeSingleNodeWriter, CSIVolumeAccessModeMultiNodeSingleWriter: + return len(v.WriteAllocs) == 0 + case CSIVolumeAccessModeMultiNodeMultiWriter: + return v.ResourceExhausted == time.Time{} + default: + return false + } +} + +// Copy returns a copy of the volume, which shares only the Topologies slice +func (v *CSIVolume) Copy() *CSIVolume { + copy := *v + out := © + out.newStructs() + + for k, v := range v.ReadAllocs { + out.ReadAllocs[k] = v + } + + for k, v := range v.WriteAllocs { + out.WriteAllocs[k] = v + } + + return out +} + +// Claim updates the allocations and changes the volume state +func (v *CSIVolume) Claim(claim CSIVolumeClaimMode, alloc *Allocation) bool { + switch claim { + case CSIVolumeClaimRead: + return v.ClaimRead(alloc) + case CSIVolumeClaimWrite: + return v.ClaimWrite(alloc) + case CSIVolumeClaimRelease: + return v.ClaimRelease(alloc) + } + return false +} + +// ClaimRead marks an allocation as using a volume read-only +func (v *CSIVolume) ClaimRead(alloc *Allocation) bool { + if _, ok := v.ReadAllocs[alloc.ID]; ok { + return true + } + + if !v.CanReadOnly() { + return false + } + // Allocations are copy on write, so we want to keep the id but don't need the + // pointer. We'll get it from the db in denormalize. + v.ReadAllocs[alloc.ID] = nil + delete(v.WriteAllocs, alloc.ID) + return true +} + +// ClaimWrite marks an allocation as using a volume as a writer +func (v *CSIVolume) ClaimWrite(alloc *Allocation) bool { + if _, ok := v.WriteAllocs[alloc.ID]; ok { + return true + } + + if !v.CanWrite() { + return false + } + // Allocations are copy on write, so we want to keep the id but don't need the + // pointer. We'll get it from the db in denormalize. + v.WriteAllocs[alloc.ID] = nil + delete(v.ReadAllocs, alloc.ID) + return true +} + +// ClaimRelease is called when the allocation has terminated and already stopped using the volume +func (v *CSIVolume) ClaimRelease(alloc *Allocation) bool { + delete(v.ReadAllocs, alloc.ID) + delete(v.WriteAllocs, alloc.ID) + return true +} + +// Equality by value +func (v *CSIVolume) Equal(o *CSIVolume) bool { + if v == nil || o == nil { + return v == o + } + + // Omit the plugin health fields, their values are controlled by plugin jobs + if v.ID == o.ID && + v.Namespace == o.Namespace && + v.AccessMode == o.AccessMode && + v.AttachmentMode == o.AttachmentMode && + v.PluginID == o.PluginID { + // Setwise equality of topologies + var ok bool + for _, t := range v.Topologies { + ok = false + for _, u := range o.Topologies { + if t.Equal(u) { + ok = true + break + } + } + if !ok { + return false + } + } + return true + } + return false +} + +// Validate validates the volume struct, returning all validation errors at once +func (v *CSIVolume) Validate() error { + errs := []string{} + + if v.ID == "" { + errs = append(errs, "missing volume id") + } + if v.PluginID == "" { + errs = append(errs, "missing plugin id") + } + if v.Namespace == "" { + errs = append(errs, "missing namespace") + } + if v.AccessMode == "" { + errs = append(errs, "missing access mode") + } + if v.AttachmentMode == "" { + errs = append(errs, "missing attachment mode") + } + + // TODO: Volume Topologies are optional - We should check to see if the plugin + // the volume is being registered with requires them. + // var ok bool + // for _, t := range v.Topologies { + // if t != nil && len(t.Segments) > 0 { + // ok = true + // break + // } + // } + // if !ok { + // errs = append(errs, "missing topology") + // } + + if len(errs) > 0 { + return fmt.Errorf("validation: %s", strings.Join(errs, ", ")) + } + return nil +} + +// Request and response wrappers +type CSIVolumeRegisterRequest struct { + Volumes []*CSIVolume + WriteRequest +} + +type CSIVolumeRegisterResponse struct { + QueryMeta +} + +type CSIVolumeDeregisterRequest struct { + VolumeIDs []string + WriteRequest +} + +type CSIVolumeDeregisterResponse struct { + QueryMeta +} + +type CSIVolumeClaimMode int + +const ( + CSIVolumeClaimRead CSIVolumeClaimMode = iota + CSIVolumeClaimWrite + CSIVolumeClaimRelease +) + +type CSIVolumeClaimRequest struct { + VolumeID string + AllocationID string + Claim CSIVolumeClaimMode + WriteRequest +} + +type CSIVolumeClaimResponse struct { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to nomad. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the nomad to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // `NodeStageVolume` or `NodePublishVolume` calls on the client + PublishContext map[string]string + + // Volume contains the expanded CSIVolume for use on the client after a Claim + // has completed. + Volume *CSIVolume + + QueryMeta +} + +type CSIVolumeListRequest struct { + PluginID string + NodeID string + QueryOptions +} + +type CSIVolumeListResponse struct { + Volumes []*CSIVolListStub + QueryMeta +} + +type CSIVolumeGetRequest struct { + ID string + QueryOptions +} + +type CSIVolumeGetResponse struct { + Volume *CSIVolume + QueryMeta +} + +// CSIPlugin collects fingerprint info context for the plugin for clients +type CSIPlugin struct { + ID string + Provider string // the vendor name from CSI GetPluginInfoResponse + Version string // the vendor verson from CSI GetPluginInfoResponse + ControllerRequired bool + + // Map Node.IDs to fingerprint results, split by type. Monolith type plugins have + // both sets of fingerprinting results. + Controllers map[string]*CSIInfo + Nodes map[string]*CSIInfo + + // Allocations are populated by denormalize to show running allocations + Allocations []*AllocListStub + + // Cache the count of healthy plugins + ControllersHealthy int + NodesHealthy int + + CreateIndex uint64 + ModifyIndex uint64 +} + +// NewCSIPlugin creates the plugin struct. No side-effects +func NewCSIPlugin(id string, index uint64) *CSIPlugin { + out := &CSIPlugin{ + ID: id, + CreateIndex: index, + ModifyIndex: index, + } + + out.newStructs() + return out +} + +func (p *CSIPlugin) newStructs() { + p.Controllers = map[string]*CSIInfo{} + p.Nodes = map[string]*CSIInfo{} +} + +func (p *CSIPlugin) Copy() *CSIPlugin { + copy := *p + out := © + out.newStructs() + + for k, v := range p.Controllers { + out.Controllers[k] = v + } + + for k, v := range p.Nodes { + out.Nodes[k] = v + } + + return out +} + +// AddPlugin adds a single plugin running on the node. Called from state.NodeUpdate in a +// transaction +func (p *CSIPlugin) AddPlugin(nodeID string, info *CSIInfo) { + if info.ControllerInfo != nil { + p.ControllerRequired = info.RequiresControllerPlugin && + info.ControllerInfo.SupportsAttachDetach + + prev, ok := p.Controllers[nodeID] + if ok && prev.Healthy { + p.ControllersHealthy -= 1 + } + p.Controllers[nodeID] = info + if info.Healthy { + p.ControllersHealthy += 1 + } + } + + if info.NodeInfo != nil { + prev, ok := p.Nodes[nodeID] + if ok && prev.Healthy { + p.NodesHealthy -= 1 + } + p.Nodes[nodeID] = info + if info.Healthy { + p.NodesHealthy += 1 + } + } +} + +// DeleteNode removes all plugins from the node. Called from state.DeleteNode in a +// transaction +func (p *CSIPlugin) DeleteNode(nodeID string) { + prev, ok := p.Controllers[nodeID] + if ok && prev.Healthy { + p.ControllersHealthy -= 1 + } + delete(p.Controllers, nodeID) + + prev, ok = p.Nodes[nodeID] + if ok && prev.Healthy { + p.NodesHealthy -= 1 + } + delete(p.Nodes, nodeID) +} + +type CSIPluginListStub struct { + ID string + Provider string + ControllerRequired bool + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + CreateIndex uint64 + ModifyIndex uint64 +} + +func (p *CSIPlugin) Stub() *CSIPluginListStub { + return &CSIPluginListStub{ + ID: p.ID, + Provider: p.Provider, + ControllerRequired: p.ControllerRequired, + ControllersHealthy: p.ControllersHealthy, + ControllersExpected: len(p.Controllers), + NodesHealthy: p.NodesHealthy, + NodesExpected: len(p.Nodes), + CreateIndex: p.CreateIndex, + ModifyIndex: p.ModifyIndex, + } +} + +func (p *CSIPlugin) IsEmpty() bool { + return len(p.Controllers) == 0 && len(p.Nodes) == 0 +} + +type CSIPluginListRequest struct { + QueryOptions +} + +type CSIPluginListResponse struct { + Plugins []*CSIPluginListStub + QueryMeta +} + +type CSIPluginGetRequest struct { + ID string + QueryOptions +} + +type CSIPluginGetResponse struct { + Plugin *CSIPlugin + QueryMeta +} diff --git a/nomad/structs/csi_test.go b/nomad/structs/csi_test.go new file mode 100644 index 000000000..2978baf49 --- /dev/null +++ b/nomad/structs/csi_test.go @@ -0,0 +1,30 @@ +package structs + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCSIVolumeClaim(t *testing.T) { + vol := NewCSIVolume("", 0) + vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter + vol.Schedulable = true + + alloc := &Allocation{ID: "a1"} + alloc2 := &Allocation{ID: "a2"} + + vol.ClaimRead(alloc) + require.True(t, vol.CanReadOnly()) + require.True(t, vol.CanWrite()) + require.True(t, vol.ClaimRead(alloc)) + + vol.ClaimWrite(alloc) + require.True(t, vol.CanReadOnly()) + require.False(t, vol.CanWrite()) + require.False(t, vol.ClaimWrite(alloc2)) + + vol.ClaimRelease(alloc) + require.True(t, vol.CanReadOnly()) + require.True(t, vol.CanWrite()) +} diff --git a/nomad/structs/node.go b/nomad/structs/node.go index 76758fb8e..b8d4ea5f9 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -1,11 +1,223 @@ package structs import ( + "reflect" "time" "github.com/hashicorp/nomad/helper" ) +// CSITopology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// +// According to CSI, there are a few requirements for the keys within this map: +// - Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// - The key name segment is REQUIRED. The prefix is OPTIONAL. +// - The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// - The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// - The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// - If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// - Each value (topological segment) MUST contain 1 or more strings. +// - Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +// +// However, Nomad applies lighter restrictions to these, as they are already +// only referenced by plugin within the scheduler and as such collisions and +// related concerns are less of an issue. We may implement these restrictions +// in the future. +type CSITopology struct { + Segments map[string]string +} + +func (t *CSITopology) Copy() *CSITopology { + if t == nil { + return nil + } + + return &CSITopology{ + Segments: helper.CopyMapStringString(t.Segments), + } +} + +func (t *CSITopology) Equal(o *CSITopology) bool { + if t == nil || o == nil { + return t == o + } + + return helper.CompareMapStringString(t.Segments, o.Segments) +} + +// CSINodeInfo is the fingerprinted data from a CSI Plugin that is specific to +// the Node API. +type CSINodeInfo struct { + // ID is the identity of a given nomad client as observed by the storage + // provider. + ID string + + // MaxVolumes is the maximum number of volumes that can be attached to the + // current host via this provider. + // If 0 then unlimited volumes may be attached. + MaxVolumes int64 + + // AccessibleTopology specifies where (regions, zones, racks, etc.) the node is + // accessible from within the storage provider. + // + // A plugin that returns this field MUST also set the `RequiresTopologies` + // property. + // + // This field is OPTIONAL. If it is not specified, then we assume that the + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "Z2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2" within the storage provider. + AccessibleTopology *CSITopology + + // RequiresNodeStageVolume indicates whether the client should Stage/Unstage + // volumes on this node. + RequiresNodeStageVolume bool +} + +func (n *CSINodeInfo) Copy() *CSINodeInfo { + if n == nil { + return nil + } + + nc := new(CSINodeInfo) + *nc = *n + nc.AccessibleTopology = n.AccessibleTopology.Copy() + + return nc +} + +// CSIControllerInfo is the fingerprinted data from a CSI Plugin that is specific to +// the Controller API. +type CSIControllerInfo struct { + // SupportsReadOnlyAttach is set to true when the controller returns the + // ATTACH_READONLY capability. + SupportsReadOnlyAttach bool + + // SupportsPublishVolume is true when the controller implements the methods + // required to attach and detach volumes. If this is false Nomad should skip + // the controller attachment flow. + SupportsAttachDetach bool + + // SupportsListVolums is true when the controller implements the ListVolumes + // RPC. NOTE: This does not guaruntee that attached nodes will be returned + // unless SupportsListVolumesAttachedNodes is also true. + SupportsListVolumes bool + + // SupportsListVolumesAttachedNodes indicates whether the plugin will return + // attached nodes data when making ListVolume RPCs + SupportsListVolumesAttachedNodes bool +} + +func (c *CSIControllerInfo) Copy() *CSIControllerInfo { + if c == nil { + return nil + } + + nc := new(CSIControllerInfo) + *nc = *c + + return nc +} + +// CSIInfo is the current state of a single CSI Plugin. This is updated regularly +// as plugin health changes on the node. +type CSIInfo struct { + PluginID string + AllocID string + Healthy bool + HealthDescription string + UpdateTime time.Time + + Provider string // vendor name from CSI GetPluginInfoResponse + ProviderVersion string // vendor version from CSI GetPluginInfoResponse + + // RequiresControllerPlugin is set when the CSI Plugin returns the + // CONTROLLER_SERVICE capability. When this is true, the volumes should not be + // scheduled on this client until a matching controller plugin is available. + RequiresControllerPlugin bool + + // RequiresTopologies is set when the CSI Plugin returns the + // VOLUME_ACCESSIBLE_CONSTRAINTS capability. When this is true, we must + // respect the Volume and Node Topology information. + RequiresTopologies bool + + // CSI Specific metadata + ControllerInfo *CSIControllerInfo `json:",omitempty"` + NodeInfo *CSINodeInfo `json:",omitempty"` +} + +func (c *CSIInfo) Copy() *CSIInfo { + if c == nil { + return nil + } + + nc := new(CSIInfo) + *nc = *c + nc.ControllerInfo = c.ControllerInfo.Copy() + nc.NodeInfo = c.NodeInfo.Copy() + + return nc +} + +func (c *CSIInfo) SetHealthy(hs bool) { + c.Healthy = hs + if hs { + c.HealthDescription = "healthy" + } else { + c.HealthDescription = "unhealthy" + } +} + +func (c *CSIInfo) Equal(o *CSIInfo) bool { + if c == nil && o == nil { + return c == o + } + + nc := *c + nc.UpdateTime = time.Time{} + no := *o + no.UpdateTime = time.Time{} + + return reflect.DeepEqual(nc, no) +} + +func (c *CSIInfo) IsController() bool { + if c == nil || c.ControllerInfo == nil { + return false + } + return true +} + +func (c *CSIInfo) IsNode() bool { + if c == nil || c.NodeInfo == nil { + return false + } + return true +} + // DriverInfo is the current state of a single driver. This is updated // regularly as driver health changes on the node. type DriverInfo struct { diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 28942c1b7..3c1fa2b6c 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -86,6 +86,9 @@ const ( ClusterMetadataRequestType ServiceIdentityAccessorRegisterRequestType ServiceIdentityAccessorDeregisterRequestType + CSIVolumeRegisterRequestType + CSIVolumeDeregisterRequestType + CSIVolumeClaimRequestType ) const ( @@ -160,6 +163,8 @@ const ( Namespaces Context = "namespaces" Quotas Context = "quotas" All Context = "all" + Plugins Context = "plugins" + Volumes Context = "volumes" ) // NamespacedID is a tuple of an ID and a namespace @@ -1659,6 +1664,11 @@ type Node struct { // Drivers is a map of driver names to current driver information Drivers map[string]*DriverInfo + // CSIControllerPlugins is a map of plugin names to current CSI Plugin info + CSIControllerPlugins map[string]*CSIInfo + // CSINodePlugins is a map of plugin names to current CSI Plugin info + CSINodePlugins map[string]*CSIInfo + // HostVolumes is a map of host volume names to their configuration HostVolumes map[string]*ClientHostVolumeConfig @@ -1705,6 +1715,8 @@ func (n *Node) Copy() *Node { nn.Meta = helper.CopyMapStringString(nn.Meta) nn.Events = copyNodeEvents(n.Events) nn.DrainStrategy = nn.DrainStrategy.Copy() + nn.CSIControllerPlugins = copyNodeCSI(nn.CSIControllerPlugins) + nn.CSINodePlugins = copyNodeCSI(nn.CSINodePlugins) nn.Drivers = copyNodeDrivers(n.Drivers) nn.HostVolumes = copyNodeHostVolumes(n.HostVolumes) return nn @@ -1724,6 +1736,21 @@ func copyNodeEvents(events []*NodeEvent) []*NodeEvent { return c } +// copyNodeCSI is a helper to copy a map of CSIInfo +func copyNodeCSI(plugins map[string]*CSIInfo) map[string]*CSIInfo { + l := len(plugins) + if l == 0 { + return nil + } + + c := make(map[string]*CSIInfo, l) + for plugin, info := range plugins { + c[plugin] = info.Copy() + } + + return c +} + // copyNodeDrivers is a helper to copy a map of DriverInfo func copyNodeDrivers(drivers map[string]*DriverInfo) map[string]*DriverInfo { l := len(drivers) @@ -5154,8 +5181,8 @@ func (tg *TaskGroup) Validate(j *Job) error { // Validate the Host Volumes for name, decl := range tg.Volumes { - if decl.Type != VolumeTypeHost { - // TODO: Remove this error when adding new volume types + if !(decl.Type == VolumeTypeHost || + decl.Type == VolumeTypeCSI) { mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume %s has unrecognised type %s", name, decl.Type)) continue } @@ -5556,6 +5583,9 @@ type Task struct { // Used internally to manage tasks according to their TaskKind. Initial use case // is for Consul Connect Kind TaskKind + + // CSIPluginConfig is used to configure the plugin supervisor for the task. + CSIPluginConfig *TaskCSIPluginConfig } // UsesConnect is for conveniently detecting if the Task is able to make use @@ -5593,6 +5623,7 @@ func (t *Task) Copy() *Task { nt.Constraints = CopySliceConstraints(nt.Constraints) nt.Affinities = CopySliceAffinities(nt.Affinities) nt.VolumeMounts = CopySliceVolumeMount(nt.VolumeMounts) + nt.CSIPluginConfig = nt.CSIPluginConfig.Copy() nt.Vault = nt.Vault.Copy() nt.Resources = nt.Resources.Copy() @@ -5811,6 +5842,19 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices } } + // Validate CSI Plugin Config + if t.CSIPluginConfig != nil { + if t.CSIPluginConfig.ID == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig must have a non-empty PluginID")) + } + + if !CSIPluginTypeIsValid(t.CSIPluginConfig.Type) { + mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"%s\"", t.CSIPluginConfig.Type)) + } + + // TODO: Investigate validation of the PluginMountDir. Not much we can do apart from check IsAbs until after we understand its execution environment though :( + } + return mErr.ErrorOrNil() } @@ -6336,6 +6380,12 @@ const ( // TaskRestoreFailed indicates Nomad was unable to reattach to a // restored task. TaskRestoreFailed = "Failed Restoring Task" + + // TaskPluginUnhealthy indicates that a plugin managed by Nomad became unhealthy + TaskPluginUnhealthy = "Plugin became unhealthy" + + // TaskPluginHealthy indicates that a plugin managed by Nomad became healthy + TaskPluginHealthy = "Plugin became healthy" ) // TaskEvent is an event that effects the state of a task and contains meta-data @@ -8646,6 +8696,11 @@ const ( // check if they are terminal. If so, we delete these out of the system. CoreJobDeploymentGC = "deployment-gc" + // CoreJobCSIVolumeClaimGC is use for the garbage collection of CSI + // volume claims. We periodically scan volumes to see if no allocs are + // claiming them. If so, we unclaim the volume. + CoreJobCSIVolumeClaimGC = "csi-volume-claim-gc" + // CoreJobForceGC is used to force garbage collection of all GCable objects. CoreJobForceGC = "force-gc" ) diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index f43ebf527..cba53774d 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -1781,6 +1781,55 @@ func TestTask_Validate_LogConfig(t *testing.T) { } } +func TestTask_Validate_CSIPluginConfig(t *testing.T) { + table := []struct { + name string + pc *TaskCSIPluginConfig + expectedErr string + }{ + { + name: "no errors when not specified", + pc: nil, + }, + { + name: "requires non-empty plugin id", + pc: &TaskCSIPluginConfig{}, + expectedErr: "CSIPluginConfig must have a non-empty PluginID", + }, + { + name: "requires valid plugin type", + pc: &TaskCSIPluginConfig{ + ID: "com.hashicorp.csi", + Type: "nonsense", + }, + expectedErr: "CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"nonsense\"", + }, + } + + for _, tt := range table { + t.Run(tt.name, func(t *testing.T) { + task := &Task{ + CSIPluginConfig: tt.pc, + } + ephemeralDisk := &EphemeralDisk{ + SizeMB: 1, + } + + err := task.Validate(ephemeralDisk, JobTypeService, nil) + mErr := err.(*multierror.Error) + if tt.expectedErr != "" { + if !strings.Contains(mErr.Errors[4].Error(), tt.expectedErr) { + t.Fatalf("err: %s", err) + } + } else { + if len(mErr.Errors) != 4 { + t.Fatalf("unexpected err: %s", mErr.Errors[4]) + } + } + }) + } +} + func TestTask_Validate_Template(t *testing.T) { bad := &Template{} diff --git a/nomad/structs/volumes.go b/nomad/structs/volumes.go index fe44e4830..e29d1c42b 100644 --- a/nomad/structs/volumes.go +++ b/nomad/structs/volumes.go @@ -86,10 +86,11 @@ func HostVolumeSliceMerge(a, b []*ClientHostVolumeConfig) []*ClientHostVolumeCon // VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use. type VolumeRequest struct { - Name string - Type string - Source string - ReadOnly bool + Name string + Type string + Source string + ReadOnly bool + MountOptions *CSIMountOptions } func (v *VolumeRequest) Copy() *VolumeRequest { @@ -99,6 +100,12 @@ func (v *VolumeRequest) Copy() *VolumeRequest { nv := new(VolumeRequest) *nv = *v + if v.MountOptions == nil { + return nv + } + + nv.MountOptions = &(*v.MountOptions) + return nv } diff --git a/nomad/testing.go b/nomad/testing.go index 3beeeb370..9d63bbf6f 100644 --- a/nomad/testing.go +++ b/nomad/testing.go @@ -15,7 +15,9 @@ import ( "github.com/hashicorp/nomad/helper/pluginutils/catalog" "github.com/hashicorp/nomad/helper/pluginutils/singleton" "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/version" ) @@ -154,3 +156,70 @@ func TestJoin(t testing.T, s1 *Server, other ...*Server) { } } } + +// CreateTestPlugin is a helper that generates the node + fingerprint results necessary to +// create a CSIPlugin by directly inserting into the state store. It's exported for use in +// other test packages +func CreateTestCSIPlugin(s *state.StateStore, id string) func() { + // Create some nodes + ns := make([]*structs.Node, 3) + for i := range ns { + n := mock.Node() + ns[i] = n + } + + // Install healthy plugin fingerprinting results + ns[0].CSIControllerPlugins = map[string]*structs.CSIInfo{ + id: { + PluginID: id, + AllocID: uuid.Generate(), + Healthy: true, + HealthDescription: "healthy", + RequiresControllerPlugin: true, + RequiresTopologies: false, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsReadOnlyAttach: true, + SupportsAttachDetach: true, + SupportsListVolumes: true, + SupportsListVolumesAttachedNodes: false, + }, + }, + } + + // Install healthy plugin fingerprinting results + for _, n := range ns[1:] { + n.CSINodePlugins = map[string]*structs.CSIInfo{ + id: { + PluginID: id, + AllocID: uuid.Generate(), + Healthy: true, + HealthDescription: "healthy", + RequiresControllerPlugin: true, + RequiresTopologies: false, + NodeInfo: &structs.CSINodeInfo{ + ID: n.ID, + MaxVolumes: 64, + RequiresNodeStageVolume: true, + }, + }, + } + } + + // Insert them into the state store + index := uint64(999) + for _, n := range ns { + index++ + s.UpsertNode(index, n) + } + + // Return cleanup function that deletes the nodes + return func() { + ids := make([]string, len(ns)) + for i, n := range ns { + ids[i] = n.ID + } + + index++ + s.DeleteNode(index, ids) + } +} diff --git a/plugins/csi/client.go b/plugins/csi/client.go new file mode 100644 index 000000000..d558b0331 --- /dev/null +++ b/plugins/csi/client.go @@ -0,0 +1,465 @@ +package csi + +import ( + "context" + "fmt" + "net" + "time" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/grpc-middleware/logging" + "github.com/hashicorp/nomad/plugins/base" + "github.com/hashicorp/nomad/plugins/shared/hclspec" + "google.golang.org/grpc" +) + +// PluginTypeCSI implements the CSI plugin interface +const PluginTypeCSI = "csi" + +type NodeGetInfoResponse struct { + NodeID string + MaxVolumes int64 + AccessibleTopology *Topology +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// +// According to CSI, there are a few requirements for the keys within this map: +// - Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// - The key name segment is REQUIRED. The prefix is OPTIONAL. +// - The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// - The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// - The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// - If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// - Each value (topological segment) MUST contain 1 or more strings. +// - Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string +} + +// CSIControllerClient defines the minimal CSI Controller Plugin interface used +// by nomad to simplify the interface required for testing. +type CSIControllerClient interface { + ControllerGetCapabilities(ctx context.Context, in *csipbv1.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ControllerGetCapabilitiesResponse, error) + ControllerPublishVolume(ctx context.Context, in *csipbv1.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *csipbv1.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *csipbv1.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ValidateVolumeCapabilitiesResponse, error) +} + +// CSINodeClient defines the minimal CSI Node Plugin interface used +// by nomad to simplify the interface required for testing. +type CSINodeClient interface { + NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) + NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetInfoResponse, error) + NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeStageVolumeResponse, error) + NodeUnstageVolume(ctx context.Context, in *csipbv1.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnstageVolumeResponse, error) + NodePublishVolume(ctx context.Context, in *csipbv1.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *csipbv1.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnpublishVolumeResponse, error) +} + +type client struct { + conn *grpc.ClientConn + identityClient csipbv1.IdentityClient + controllerClient CSIControllerClient + nodeClient CSINodeClient +} + +func (c *client) Close() error { + if c.conn != nil { + return c.conn.Close() + } + return nil +} + +func NewClient(addr string, logger hclog.Logger) (CSIPlugin, error) { + if addr == "" { + return nil, fmt.Errorf("address is empty") + } + + conn, err := newGrpcConn(addr, logger) + if err != nil { + return nil, err + } + + return &client{ + conn: conn, + identityClient: csipbv1.NewIdentityClient(conn), + controllerClient: csipbv1.NewControllerClient(conn), + nodeClient: csipbv1.NewNodeClient(conn), + }, nil +} + +func newGrpcConn(addr string, logger hclog.Logger) (*grpc.ClientConn, error) { + conn, err := grpc.Dial( + addr, + grpc.WithInsecure(), + grpc.WithUnaryInterceptor(logging.UnaryClientInterceptor(logger)), + grpc.WithStreamInterceptor(logging.StreamClientInterceptor(logger)), + grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", target, timeout) + }), + ) + + if err != nil { + return nil, fmt.Errorf("failed to open grpc connection to addr: %s, err: %v", addr, err) + } + + return conn, nil +} + +// PluginInfo describes the type and version of a plugin as required by the nomad +// base.BasePlugin interface. +func (c *client) PluginInfo() (*base.PluginInfoResponse, error) { + name, version, err := c.PluginGetInfo(context.TODO()) + if err != nil { + return nil, err + } + + return &base.PluginInfoResponse{ + Type: PluginTypeCSI, // note: this isn't a Nomad go-plugin type + PluginApiVersions: []string{"1.0.0"}, // TODO(tgross): we want to fingerprint spec version, but this isn't included as a field from the plugins + PluginVersion: version, + Name: name, + }, nil +} + +// ConfigSchema returns the schema for parsing the plugins configuration as +// required by the base.BasePlugin interface. It will always return nil. +func (c *client) ConfigSchema() (*hclspec.Spec, error) { + return nil, nil +} + +// SetConfig is used to set the configuration by passing a MessagePack +// encoding of it. +func (c *client) SetConfig(_ *base.Config) error { + return fmt.Errorf("unsupported") +} + +func (c *client) PluginProbe(ctx context.Context) (bool, error) { + req, err := c.identityClient.Probe(ctx, &csipbv1.ProbeRequest{}) + if err != nil { + return false, err + } + + wrapper := req.GetReady() + + // wrapper.GetValue() protects against wrapper being `nil`, and returns false. + ready := wrapper.GetValue() + + if wrapper == nil { + // If the plugin returns a nil value for ready, then it should be + // interpreted as the plugin is ready for compatibility with plugins that + // do not do health checks. + ready = true + } + + return ready, nil +} + +func (c *client) PluginGetInfo(ctx context.Context) (string, string, error) { + if c == nil { + return "", "", fmt.Errorf("Client not initialized") + } + if c.identityClient == nil { + return "", "", fmt.Errorf("Client not initialized") + } + + resp, err := c.identityClient.GetPluginInfo(ctx, &csipbv1.GetPluginInfoRequest{}) + if err != nil { + return "", "", err + } + + name := resp.GetName() + if name == "" { + return "", "", fmt.Errorf("PluginGetInfo: plugin returned empty name field") + } + version := resp.GetVendorVersion() + + return name, version, nil +} + +func (c *client) PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySet, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.identityClient == nil { + return nil, fmt.Errorf("Client not initialized") + } + + resp, err := c.identityClient.GetPluginCapabilities(ctx, &csipbv1.GetPluginCapabilitiesRequest{}) + if err != nil { + return nil, err + } + + return NewPluginCapabilitySet(resp), nil +} + +// +// Controller Endpoints +// + +func (c *client) ControllerGetCapabilities(ctx context.Context) (*ControllerCapabilitySet, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.controllerClient == nil { + return nil, fmt.Errorf("controllerClient not initialized") + } + + resp, err := c.controllerClient.ControllerGetCapabilities(ctx, &csipbv1.ControllerGetCapabilitiesRequest{}) + if err != nil { + return nil, err + } + + return NewControllerCapabilitySet(resp), nil +} + +func (c *client) ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.controllerClient == nil { + return nil, fmt.Errorf("controllerClient not initialized") + } + + err := req.Validate() + if err != nil { + return nil, err + } + + pbrequest := req.ToCSIRepresentation() + resp, err := c.controllerClient.ControllerPublishVolume(ctx, pbrequest) + if err != nil { + return nil, err + } + + return &ControllerPublishVolumeResponse{ + PublishContext: helper.CopyMapStringString(resp.PublishContext), + }, nil +} + +func (c *client) ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.controllerClient == nil { + return nil, fmt.Errorf("controllerClient not initialized") + } + err := req.Validate() + if err != nil { + return nil, err + } + + upbrequest := req.ToCSIRepresentation() + _, err = c.controllerClient.ControllerUnpublishVolume(ctx, upbrequest) + if err != nil { + return nil, err + } + + return &ControllerUnpublishVolumeResponse{}, nil +} + +func (c *client) ControllerValidateCapabilties(ctx context.Context, volumeID string, capabilities *VolumeCapability) error { + if c == nil { + return fmt.Errorf("Client not initialized") + } + if c.controllerClient == nil { + return fmt.Errorf("controllerClient not initialized") + } + + if volumeID == "" { + return fmt.Errorf("missing VolumeID") + } + + if capabilities == nil { + return fmt.Errorf("missing Capabilities") + } + + req := &csipbv1.ValidateVolumeCapabilitiesRequest{ + VolumeId: volumeID, + VolumeCapabilities: []*csipbv1.VolumeCapability{ + capabilities.ToCSIRepresentation(), + }, + } + + resp, err := c.controllerClient.ValidateVolumeCapabilities(ctx, req) + if err != nil { + return err + } + + if resp.Confirmed == nil { + if resp.Message != "" { + return fmt.Errorf("Volume validation failed, message: %s", resp.Message) + } + + return fmt.Errorf("Volume validation failed") + } + + return nil +} + +// +// Node Endpoints +// + +func (c *client) NodeGetCapabilities(ctx context.Context) (*NodeCapabilitySet, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return nil, fmt.Errorf("Client not initialized") + } + + resp, err := c.nodeClient.NodeGetCapabilities(ctx, &csipbv1.NodeGetCapabilitiesRequest{}) + if err != nil { + return nil, err + } + + return NewNodeCapabilitySet(resp), nil +} + +func (c *client) NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return nil, fmt.Errorf("Client not initialized") + } + + result := &NodeGetInfoResponse{} + + resp, err := c.nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{}) + if err != nil { + return nil, err + } + + if resp.GetNodeId() == "" { + return nil, fmt.Errorf("plugin failed to return nodeid") + } + + result.NodeID = resp.GetNodeId() + result.MaxVolumes = resp.GetMaxVolumesPerNode() + + return result, nil +} + +func (c *client) NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *VolumeCapability, opts ...grpc.CallOption) error { + if c == nil { + return fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return fmt.Errorf("Client not initialized") + } + + // These errors should not be returned during production use but exist as aids + // during Nomad Development + if volumeID == "" { + return fmt.Errorf("missing volumeID") + } + if stagingTargetPath == "" { + return fmt.Errorf("missing stagingTargetPath") + } + + req := &csipbv1.NodeStageVolumeRequest{ + VolumeId: volumeID, + PublishContext: publishContext, + StagingTargetPath: stagingTargetPath, + VolumeCapability: capabilities.ToCSIRepresentation(), + } + + // NodeStageVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodeStageVolume(ctx, req, opts...) + return err +} + +func (c *client) NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string, opts ...grpc.CallOption) error { + if c == nil { + return fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return fmt.Errorf("Client not initialized") + } + // These errors should not be returned during production use but exist as aids + // during Nomad Development + if volumeID == "" { + return fmt.Errorf("missing volumeID") + } + if stagingTargetPath == "" { + return fmt.Errorf("missing stagingTargetPath") + } + + req := &csipbv1.NodeUnstageVolumeRequest{ + VolumeId: volumeID, + StagingTargetPath: stagingTargetPath, + } + + // NodeUnstageVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodeUnstageVolume(ctx, req, opts...) + return err +} + +func (c *client) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest, opts ...grpc.CallOption) error { + if c == nil { + return fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return fmt.Errorf("Client not initialized") + } + + if err := req.Validate(); err != nil { + return fmt.Errorf("validation error: %v", err) + } + + // NodePublishVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodePublishVolume(ctx, req.ToCSIRepresentation(), opts...) + return err +} + +func (c *client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string, opts ...grpc.CallOption) error { + if c == nil { + return fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return fmt.Errorf("Client not initialized") + } + + if volumeID == "" { + return fmt.Errorf("missing VolumeID") + } + + if targetPath == "" { + return fmt.Errorf("missing TargetPath") + } + + req := &csipbv1.NodeUnpublishVolumeRequest{ + VolumeId: volumeID, + TargetPath: targetPath, + } + + // NodeUnpublishVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodeUnpublishVolume(ctx, req, opts...) + return err +} diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go new file mode 100644 index 000000000..ff8f6f2ff --- /dev/null +++ b/plugins/csi/client_test.go @@ -0,0 +1,657 @@ +package csi + +import ( + "context" + "errors" + "fmt" + "testing" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/protobuf/ptypes/wrappers" + fake "github.com/hashicorp/nomad/plugins/csi/testing" + "github.com/stretchr/testify/require" +) + +func newTestClient() (*fake.IdentityClient, *fake.ControllerClient, *fake.NodeClient, CSIPlugin) { + ic := fake.NewIdentityClient() + cc := fake.NewControllerClient() + nc := fake.NewNodeClient() + client := &client{ + identityClient: ic, + controllerClient: cc, + nodeClient: nc, + } + + return ic, cc, nc, client +} + +func TestClient_RPC_PluginProbe(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + ProbeResponse *csipbv1.ProbeResponse + ExpectedResponse bool + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "returns false for ready when the provider returns false", + ProbeResponse: &csipbv1.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: false}, + }, + ExpectedResponse: false, + }, + { + Name: "returns true for ready when the provider returns true", + ProbeResponse: &csipbv1.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: true}, + }, + ExpectedResponse: true, + }, + { + /* When a SP does not return a ready value, a CO MAY treat this as ready. + We do so because example plugins rely on this behaviour. We may + re-evaluate this decision in the future. */ + Name: "returns true for ready when the provider returns a nil wrapper", + ProbeResponse: &csipbv1.ProbeResponse{ + Ready: nil, + }, + ExpectedResponse: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + ic, _, _, client := newTestClient() + defer client.Close() + + ic.NextErr = c.ResponseErr + ic.NextPluginProbe = c.ProbeResponse + + resp, err := client.PluginProbe(context.TODO()) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } + + require.Equal(t, c.ExpectedResponse, resp) + }) + } + +} + +func TestClient_RPC_PluginInfo(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + InfoResponse *csipbv1.GetPluginInfoResponse + ExpectedResponseName string + ExpectedResponseVersion string + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "returns an error if we receive an empty `name`", + InfoResponse: &csipbv1.GetPluginInfoResponse{ + Name: "", + VendorVersion: "", + }, + ExpectedErr: fmt.Errorf("PluginGetInfo: plugin returned empty name field"), + }, + { + Name: "returns the name when successfully retrieved and not empty", + InfoResponse: &csipbv1.GetPluginInfoResponse{ + Name: "com.hashicorp.storage", + VendorVersion: "1.0.1", + }, + ExpectedResponseName: "com.hashicorp.storage", + ExpectedResponseVersion: "1.0.1", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + ic, _, _, client := newTestClient() + defer client.Close() + + ic.NextErr = c.ResponseErr + ic.NextPluginInfo = c.InfoResponse + + name, version, err := client.PluginGetInfo(context.TODO()) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } + + require.Equal(t, c.ExpectedResponseName, name) + require.Equal(t, c.ExpectedResponseVersion, version) + }) + } + +} + +func TestClient_RPC_PluginGetCapabilities(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.GetPluginCapabilitiesResponse + ExpectedResponse *PluginCapabilitySet + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "HasControllerService is true when it's part of the response", + Response: &csipbv1.GetPluginCapabilitiesResponse{ + Capabilities: []*csipbv1.PluginCapability{ + { + Type: &csipbv1.PluginCapability_Service_{ + Service: &csipbv1.PluginCapability_Service{ + Type: csipbv1.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + }, + }, + ExpectedResponse: &PluginCapabilitySet{hasControllerService: true}, + }, + { + Name: "HasTopologies is true when it's part of the response", + Response: &csipbv1.GetPluginCapabilitiesResponse{ + Capabilities: []*csipbv1.PluginCapability{ + { + Type: &csipbv1.PluginCapability_Service_{ + Service: &csipbv1.PluginCapability_Service{ + Type: csipbv1.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS, + }, + }, + }, + }, + }, + ExpectedResponse: &PluginCapabilitySet{hasTopologies: true}, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + ic, _, _, client := newTestClient() + defer client.Close() + + ic.NextErr = c.ResponseErr + ic.NextPluginCapabilities = c.Response + + resp, err := client.PluginGetCapabilities(context.TODO()) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } + + require.Equal(t, c.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_ControllerGetCapabilities(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.ControllerGetCapabilitiesResponse + ExpectedResponse *ControllerCapabilitySet + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "ignores unknown capabilities", + Response: &csipbv1.ControllerGetCapabilitiesResponse{ + Capabilities: []*csipbv1.ControllerServiceCapability{ + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_GET_CAPACITY, + }, + }, + }, + }, + }, + ExpectedResponse: &ControllerCapabilitySet{}, + }, + { + Name: "detects list volumes capabilities", + Response: &csipbv1.ControllerGetCapabilitiesResponse{ + Capabilities: []*csipbv1.ControllerServiceCapability{ + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES, + }, + }, + }, + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES, + }, + }, + }, + }, + }, + ExpectedResponse: &ControllerCapabilitySet{ + HasListVolumes: true, + HasListVolumesPublishedNodes: true, + }, + }, + { + Name: "detects publish capabilities", + Response: &csipbv1.ControllerGetCapabilitiesResponse{ + Capabilities: []*csipbv1.ControllerServiceCapability{ + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_PUBLISH_READONLY, + }, + }, + }, + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }, + }, + }, + }, + }, + ExpectedResponse: &ControllerCapabilitySet{ + HasPublishUnpublishVolume: true, + HasPublishReadonly: true, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, _, client := newTestClient() + defer client.Close() + + cc.NextErr = tc.ResponseErr + cc.NextCapabilitiesResponse = tc.Response + + resp, err := client.ControllerGetCapabilities(context.TODO()) + if tc.ExpectedErr != nil { + require.Error(t, tc.ExpectedErr, err) + } + + require.Equal(t, tc.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_NodeGetCapabilities(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.NodeGetCapabilitiesResponse + ExpectedResponse *NodeCapabilitySet + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "ignores unknown capabilities", + Response: &csipbv1.NodeGetCapabilitiesResponse{ + Capabilities: []*csipbv1.NodeServiceCapability{ + { + Type: &csipbv1.NodeServiceCapability_Rpc{ + Rpc: &csipbv1.NodeServiceCapability_RPC{ + Type: csipbv1.NodeServiceCapability_RPC_EXPAND_VOLUME, + }, + }, + }, + }, + }, + ExpectedResponse: &NodeCapabilitySet{}, + }, + { + Name: "detects stage volumes capability", + Response: &csipbv1.NodeGetCapabilitiesResponse{ + Capabilities: []*csipbv1.NodeServiceCapability{ + { + Type: &csipbv1.NodeServiceCapability_Rpc{ + Rpc: &csipbv1.NodeServiceCapability_RPC{ + Type: csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, + }, + }, + }, + }, + }, + ExpectedResponse: &NodeCapabilitySet{ + HasStageUnstageVolume: true, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, _, nc, client := newTestClient() + defer client.Close() + + nc.NextErr = tc.ResponseErr + nc.NextCapabilitiesResponse = tc.Response + + resp, err := client.NodeGetCapabilities(context.TODO()) + if tc.ExpectedErr != nil { + require.Error(t, tc.ExpectedErr, err) + } + + require.Equal(t, tc.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_ControllerPublishVolume(t *testing.T) { + cases := []struct { + Name string + Request *ControllerPublishVolumeRequest + ResponseErr error + Response *csipbv1.ControllerPublishVolumeResponse + ExpectedResponse *ControllerPublishVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &ControllerPublishVolumeRequest{}, + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "Handles missing NodeID", + Request: &ControllerPublishVolumeRequest{}, + Response: &csipbv1.ControllerPublishVolumeResponse{}, + ExpectedErr: fmt.Errorf("missing NodeID"), + }, + + { + Name: "Handles PublishContext == nil", + Request: &ControllerPublishVolumeRequest{VolumeID: "vol", NodeID: "node"}, + Response: &csipbv1.ControllerPublishVolumeResponse{}, + ExpectedResponse: &ControllerPublishVolumeResponse{}, + }, + { + Name: "Handles PublishContext != nil", + Request: &ControllerPublishVolumeRequest{VolumeID: "vol", NodeID: "node"}, + Response: &csipbv1.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "com.hashicorp/nomad-node-id": "foobar", + "com.plugin/device": "/dev/sdc1", + }, + }, + ExpectedResponse: &ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "com.hashicorp/nomad-node-id": "foobar", + "com.plugin/device": "/dev/sdc1", + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, cc, _, client := newTestClient() + defer client.Close() + + cc.NextErr = c.ResponseErr + cc.NextPublishVolumeResponse = c.Response + + resp, err := client.ControllerPublishVolume(context.TODO(), c.Request) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } + + require.Equal(t, c.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_ControllerUnpublishVolume(t *testing.T) { + cases := []struct { + Name string + Request *ControllerUnpublishVolumeRequest + ResponseErr error + Response *csipbv1.ControllerUnpublishVolumeResponse + ExpectedResponse *ControllerUnpublishVolumeResponse + ExpectedErr error + }{ + { + Name: "Handles underlying grpc errors", + Request: &ControllerUnpublishVolumeRequest{}, + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "Handles missing NodeID", + Request: &ControllerUnpublishVolumeRequest{}, + ExpectedErr: fmt.Errorf("missing NodeID"), + ExpectedResponse: nil, + }, + { + Name: "Handles successful response", + Request: &ControllerUnpublishVolumeRequest{VolumeID: "vol", NodeID: "node"}, + ExpectedErr: fmt.Errorf("missing NodeID"), + ExpectedResponse: &ControllerUnpublishVolumeResponse{}, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, cc, _, client := newTestClient() + defer client.Close() + + cc.NextErr = c.ResponseErr + cc.NextUnpublishVolumeResponse = c.Response + + resp, err := client.ControllerUnpublishVolume(context.TODO(), c.Request) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } + + require.Equal(t, c.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_NodeStageVolume(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.NodeStageVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "handles success", + ResponseErr: nil, + ExpectedErr: nil, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, _, nc, client := newTestClient() + defer client.Close() + + nc.NextErr = c.ResponseErr + nc.NextStageVolumeResponse = c.Response + + err := client.NodeStageVolume(context.TODO(), "foo", nil, "/foo", &VolumeCapability{}) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } else { + require.Nil(t, err) + } + }) + } +} + +func TestClient_RPC_NodeUnstageVolume(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.NodeUnstageVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "handles success", + ResponseErr: nil, + ExpectedErr: nil, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, _, nc, client := newTestClient() + defer client.Close() + + nc.NextErr = c.ResponseErr + nc.NextUnstageVolumeResponse = c.Response + + err := client.NodeUnstageVolume(context.TODO(), "foo", "/foo") + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } else { + require.Nil(t, err) + } + }) + } +} + +func TestClient_RPC_NodePublishVolume(t *testing.T) { + cases := []struct { + Name string + Request *NodePublishVolumeRequest + ResponseErr error + Response *csipbv1.NodePublishVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &NodePublishVolumeRequest{ + VolumeID: "foo", + TargetPath: "/dev/null", + VolumeCapability: &VolumeCapability{}, + }, + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "handles success", + Request: &NodePublishVolumeRequest{ + VolumeID: "foo", + TargetPath: "/dev/null", + VolumeCapability: &VolumeCapability{}, + }, + ResponseErr: nil, + ExpectedErr: nil, + }, + { + Name: "Performs validation of the publish volume request", + Request: &NodePublishVolumeRequest{ + VolumeID: "", + }, + ResponseErr: nil, + ExpectedErr: errors.New("missing VolumeID"), + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, _, nc, client := newTestClient() + defer client.Close() + + nc.NextErr = c.ResponseErr + nc.NextPublishVolumeResponse = c.Response + + err := client.NodePublishVolume(context.TODO(), c.Request) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } else { + require.Nil(t, err) + } + }) + } +} +func TestClient_RPC_NodeUnpublishVolume(t *testing.T) { + cases := []struct { + Name string + VolumeID string + TargetPath string + ResponseErr error + Response *csipbv1.NodeUnpublishVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + VolumeID: "foo", + TargetPath: "/dev/null", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "handles success", + VolumeID: "foo", + TargetPath: "/dev/null", + ResponseErr: nil, + ExpectedErr: nil, + }, + { + Name: "Performs validation of the request args - VolumeID", + ResponseErr: nil, + ExpectedErr: errors.New("missing VolumeID"), + }, + { + Name: "Performs validation of the request args - TargetPath", + VolumeID: "foo", + ResponseErr: nil, + ExpectedErr: errors.New("missing TargetPath"), + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, _, nc, client := newTestClient() + defer client.Close() + + nc.NextErr = c.ResponseErr + nc.NextUnpublishVolumeResponse = c.Response + + err := client.NodeUnpublishVolume(context.TODO(), c.VolumeID, c.TargetPath) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } else { + require.Nil(t, err) + } + }) + } +} diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go new file mode 100644 index 000000000..b971ce260 --- /dev/null +++ b/plugins/csi/fake/client.go @@ -0,0 +1,278 @@ +// fake is a package that includes fake implementations of public interfaces +// from the CSI package for testing. +package fake + +import ( + "context" + "errors" + "fmt" + "sync" + + "github.com/hashicorp/nomad/plugins/base" + "github.com/hashicorp/nomad/plugins/csi" + "github.com/hashicorp/nomad/plugins/shared/hclspec" + "google.golang.org/grpc" +) + +var _ csi.CSIPlugin = &Client{} + +// Client is a mock implementation of the csi.CSIPlugin interface for use in testing +// external components +type Client struct { + Mu sync.RWMutex + + NextPluginInfoResponse *base.PluginInfoResponse + NextPluginInfoErr error + PluginInfoCallCount int64 + + NextPluginProbeResponse bool + NextPluginProbeErr error + PluginProbeCallCount int64 + + NextPluginGetInfoNameResponse string + NextPluginGetInfoVersionResponse string + NextPluginGetInfoErr error + PluginGetInfoCallCount int64 + + NextPluginGetCapabilitiesResponse *csi.PluginCapabilitySet + NextPluginGetCapabilitiesErr error + PluginGetCapabilitiesCallCount int64 + + NextControllerGetCapabilitiesResponse *csi.ControllerCapabilitySet + NextControllerGetCapabilitiesErr error + ControllerGetCapabilitiesCallCount int64 + + NextControllerPublishVolumeResponse *csi.ControllerPublishVolumeResponse + NextControllerPublishVolumeErr error + ControllerPublishVolumeCallCount int64 + + NextControllerUnpublishVolumeResponse *csi.ControllerUnpublishVolumeResponse + NextControllerUnpublishVolumeErr error + ControllerUnpublishVolumeCallCount int64 + + NextControllerValidateVolumeErr error + ControllerValidateVolumeCallCount int64 + + NextNodeGetCapabilitiesResponse *csi.NodeCapabilitySet + NextNodeGetCapabilitiesErr error + NodeGetCapabilitiesCallCount int64 + + NextNodeGetInfoResponse *csi.NodeGetInfoResponse + NextNodeGetInfoErr error + NodeGetInfoCallCount int64 + + NextNodeStageVolumeErr error + NodeStageVolumeCallCount int64 + + NextNodeUnstageVolumeErr error + NodeUnstageVolumeCallCount int64 + + PrevVolumeCapability *csi.VolumeCapability + NextNodePublishVolumeErr error + NodePublishVolumeCallCount int64 + + NextNodeUnpublishVolumeErr error + NodeUnpublishVolumeCallCount int64 +} + +// PluginInfo describes the type and version of a plugin. +func (c *Client) PluginInfo() (*base.PluginInfoResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginInfoCallCount++ + + return c.NextPluginInfoResponse, c.NextPluginInfoErr +} + +// ConfigSchema returns the schema for parsing the plugins configuration. +func (c *Client) ConfigSchema() (*hclspec.Spec, error) { + return nil, errors.New("Unsupported") +} + +// SetConfig is used to set the configuration by passing a MessagePack +// encoding of it. +func (c *Client) SetConfig(a *base.Config) error { + return errors.New("Unsupported") +} + +// PluginProbe is used to verify that the plugin is in a healthy state +func (c *Client) PluginProbe(ctx context.Context) (bool, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginProbeCallCount++ + + return c.NextPluginProbeResponse, c.NextPluginProbeErr +} + +// PluginGetInfo is used to return semantic data about the plugin. +// Response: +// - string: name, the name of the plugin in domain notation format. +func (c *Client) PluginGetInfo(ctx context.Context) (string, string, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginGetInfoCallCount++ + + return c.NextPluginGetInfoNameResponse, c.NextPluginGetInfoVersionResponse, c.NextPluginGetInfoErr +} + +// PluginGetCapabilities is used to return the available capabilities from the +// identity service. This currently only looks for the CONTROLLER_SERVICE and +// Accessible Topology Support +func (c *Client) PluginGetCapabilities(ctx context.Context) (*csi.PluginCapabilitySet, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginGetCapabilitiesCallCount++ + + return c.NextPluginGetCapabilitiesResponse, c.NextPluginGetCapabilitiesErr +} + +func (c *Client) ControllerGetCapabilities(ctx context.Context) (*csi.ControllerCapabilitySet, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerGetCapabilitiesCallCount++ + + return c.NextControllerGetCapabilitiesResponse, c.NextControllerGetCapabilitiesErr +} + +// ControllerPublishVolume is used to attach a remote volume to a node +func (c *Client) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerPublishVolumeCallCount++ + + return c.NextControllerPublishVolumeResponse, c.NextControllerPublishVolumeErr +} + +// ControllerUnpublishVolume is used to attach a remote volume to a node +func (c *Client) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerUnpublishVolumeCallCount++ + + return c.NextControllerUnpublishVolumeResponse, c.NextControllerUnpublishVolumeErr +} + +func (c *Client) ControllerValidateCapabilties(ctx context.Context, volumeID string, capabilities *csi.VolumeCapability) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerValidateVolumeCallCount++ + + return c.NextControllerValidateVolumeErr +} + +func (c *Client) NodeGetCapabilities(ctx context.Context) (*csi.NodeCapabilitySet, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeGetCapabilitiesCallCount++ + + return c.NextNodeGetCapabilitiesResponse, c.NextNodeGetCapabilitiesErr +} + +// NodeGetInfo is used to return semantic data about the current node in +// respect to the SP. +func (c *Client) NodeGetInfo(ctx context.Context) (*csi.NodeGetInfoResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeGetInfoCallCount++ + + return c.NextNodeGetInfoResponse, c.NextNodeGetInfoErr +} + +// NodeStageVolume is used when a plugin has the STAGE_UNSTAGE volume capability +// to prepare a volume for usage on a host. If err == nil, the response should +// be assumed to be successful. +func (c *Client) NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *csi.VolumeCapability, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeStageVolumeCallCount++ + + return c.NextNodeStageVolumeErr +} + +// NodeUnstageVolume is used when a plugin has the STAGE_UNSTAGE volume capability +// to undo the work performed by NodeStageVolume. If a volume has been staged, +// this RPC must be called before freeing the volume. +// +// If err == nil, the response should be assumed to be successful. +func (c *Client) NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeUnstageVolumeCallCount++ + + return c.NextNodeUnstageVolumeErr +} + +func (c *Client) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PrevVolumeCapability = req.VolumeCapability + c.NodePublishVolumeCallCount++ + + return c.NextNodePublishVolumeErr +} + +func (c *Client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string, opts ...grpc.CallOption) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeUnpublishVolumeCallCount++ + + return c.NextNodeUnpublishVolumeErr +} + +// Shutdown the client and ensure any connections are cleaned up. +func (c *Client) Close() error { + + c.NextPluginInfoResponse = nil + c.NextPluginInfoErr = fmt.Errorf("closed client") + + c.NextPluginProbeResponse = false + c.NextPluginProbeErr = fmt.Errorf("closed client") + + c.NextPluginGetInfoNameResponse = "" + c.NextPluginGetInfoVersionResponse = "" + c.NextPluginGetInfoErr = fmt.Errorf("closed client") + + c.NextPluginGetCapabilitiesResponse = nil + c.NextPluginGetCapabilitiesErr = fmt.Errorf("closed client") + + c.NextControllerGetCapabilitiesResponse = nil + c.NextControllerGetCapabilitiesErr = fmt.Errorf("closed client") + + c.NextControllerPublishVolumeResponse = nil + c.NextControllerPublishVolumeErr = fmt.Errorf("closed client") + + c.NextControllerUnpublishVolumeResponse = nil + c.NextControllerUnpublishVolumeErr = fmt.Errorf("closed client") + + c.NextControllerValidateVolumeErr = fmt.Errorf("closed client") + + c.NextNodeGetCapabilitiesResponse = nil + c.NextNodeGetCapabilitiesErr = fmt.Errorf("closed client") + + c.NextNodeGetInfoResponse = nil + c.NextNodeGetInfoErr = fmt.Errorf("closed client") + + c.NextNodeStageVolumeErr = fmt.Errorf("closed client") + + c.NextNodeUnstageVolumeErr = fmt.Errorf("closed client") + + c.NextNodePublishVolumeErr = fmt.Errorf("closed client") + + c.NextNodeUnpublishVolumeErr = fmt.Errorf("closed client") + + return nil +} diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go new file mode 100644 index 000000000..345b9f753 --- /dev/null +++ b/plugins/csi/plugin.go @@ -0,0 +1,433 @@ +package csi + +import ( + "context" + "errors" + "fmt" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/base" + "google.golang.org/grpc" +) + +// CSIPlugin implements a lightweight abstraction layer around a CSI Plugin. +// It validates that responses from storage providers (SP's), correctly conform +// to the specification before returning response data or erroring. +type CSIPlugin interface { + base.BasePlugin + + // PluginProbe is used to verify that the plugin is in a healthy state + PluginProbe(ctx context.Context) (bool, error) + + // PluginGetInfo is used to return semantic data about the plugin. + // Response: + // - string: name, the name of the plugin in domain notation format. + // - string: version, the vendor version of the plugin + PluginGetInfo(ctx context.Context) (string, string, error) + + // PluginGetCapabilities is used to return the available capabilities from the + // identity service. This currently only looks for the CONTROLLER_SERVICE and + // Accessible Topology Support + PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySet, error) + + // GetControllerCapabilities is used to get controller-specific capabilities + // for a plugin. + ControllerGetCapabilities(ctx context.Context) (*ControllerCapabilitySet, error) + + // ControllerPublishVolume is used to attach a remote volume to a cluster node. + ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + + // ControllerUnpublishVolume is used to deattach a remote volume from a cluster node. + ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + + // ControllerValidateCapabilities is used to validate that a volume exists and + // supports the requested capability. + ControllerValidateCapabilties(ctx context.Context, volumeID string, capabilities *VolumeCapability) error + + // NodeGetCapabilities is used to return the available capabilities from the + // Node Service. + NodeGetCapabilities(ctx context.Context) (*NodeCapabilitySet, error) + + // NodeGetInfo is used to return semantic data about the current node in + // respect to the SP. + NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) + + // NodeStageVolume is used when a plugin has the STAGE_UNSTAGE volume capability + // to prepare a volume for usage on a host. If err == nil, the response should + // be assumed to be successful. + NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *VolumeCapability, opts ...grpc.CallOption) error + + // NodeUnstageVolume is used when a plugin has the STAGE_UNSTAGE volume capability + // to undo the work performed by NodeStageVolume. If a volume has been staged, + // this RPC must be called before freeing the volume. + // + // If err == nil, the response should be assumed to be successful. + NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string, opts ...grpc.CallOption) error + + // NodePublishVolume is used to prepare a volume for use by an allocation. + // if err == nil the response should be assumed to be successful. + NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest, opts ...grpc.CallOption) error + + // NodeUnpublishVolume is used to cleanup usage of a volume for an alloc. This + // MUST be called before calling NodeUnstageVolume or ControllerUnpublishVolume + // for the given volume. + NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string, opts ...grpc.CallOption) error + + // Shutdown the client and ensure any connections are cleaned up. + Close() error +} + +type NodePublishVolumeRequest struct { + // The ID of the volume to publish. + VolumeID string + + // If the volume was attached via a call to `ControllerPublishVolume` then + // we need to provide the returned PublishContext here. + PublishContext map[string]string + + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // E.g {the plugins internal mount path}/staging/volumeid/... + // + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + StagingTargetPath string + + // The path to which the volume will be published. + // It MUST be an absolute path in the root filesystem of the process serving this + // request. + // E.g {the plugins internal mount path}/per-alloc/allocid/volumeid/... + // + // The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + TargetPath string + + // Volume capability describing how the CO intends to use this volume. + VolumeCapability *VolumeCapability + + Readonly bool + + // Reserved for future use. + Secrets map[string]string +} + +func (r *NodePublishVolumeRequest) ToCSIRepresentation() *csipbv1.NodePublishVolumeRequest { + if r == nil { + return nil + } + + return &csipbv1.NodePublishVolumeRequest{ + VolumeId: r.VolumeID, + PublishContext: r.PublishContext, + StagingTargetPath: r.StagingTargetPath, + TargetPath: r.TargetPath, + VolumeCapability: r.VolumeCapability.ToCSIRepresentation(), + Readonly: r.Readonly, + Secrets: r.Secrets, + } +} + +func (r *NodePublishVolumeRequest) Validate() error { + if r.VolumeID == "" { + return errors.New("missing VolumeID") + } + + if r.TargetPath == "" { + return errors.New("missing TargetPath") + } + + if r.VolumeCapability == nil { + return errors.New("missing VolumeCapabilities") + } + + return nil +} + +type PluginCapabilitySet struct { + hasControllerService bool + hasTopologies bool +} + +func (p *PluginCapabilitySet) HasControllerService() bool { + return p.hasControllerService +} + +// HasTopologies indicates whether the volumes for this plugin are equally +// accessible by all nodes in the cluster. +// If true, we MUST use the topology information when scheduling workloads. +func (p *PluginCapabilitySet) HasToplogies() bool { + return p.hasTopologies +} + +func (p *PluginCapabilitySet) IsEqual(o *PluginCapabilitySet) bool { + return p.hasControllerService == o.hasControllerService && p.hasTopologies == o.hasTopologies +} + +func NewTestPluginCapabilitySet(topologies, controller bool) *PluginCapabilitySet { + return &PluginCapabilitySet{ + hasTopologies: topologies, + hasControllerService: controller, + } +} + +func NewPluginCapabilitySet(capabilities *csipbv1.GetPluginCapabilitiesResponse) *PluginCapabilitySet { + cs := &PluginCapabilitySet{} + + pluginCapabilities := capabilities.GetCapabilities() + + for _, pcap := range pluginCapabilities { + if svcCap := pcap.GetService(); svcCap != nil { + switch svcCap.Type { + case csipbv1.PluginCapability_Service_UNKNOWN: + continue + case csipbv1.PluginCapability_Service_CONTROLLER_SERVICE: + cs.hasControllerService = true + case csipbv1.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: + cs.hasTopologies = true + default: + continue + } + } + } + + return cs +} + +type ControllerCapabilitySet struct { + HasPublishUnpublishVolume bool + HasPublishReadonly bool + HasListVolumes bool + HasListVolumesPublishedNodes bool +} + +func NewControllerCapabilitySet(resp *csipbv1.ControllerGetCapabilitiesResponse) *ControllerCapabilitySet { + cs := &ControllerCapabilitySet{} + + pluginCapabilities := resp.GetCapabilities() + for _, pcap := range pluginCapabilities { + if c := pcap.GetRpc(); c != nil { + switch c.Type { + case csipbv1.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: + cs.HasPublishUnpublishVolume = true + case csipbv1.ControllerServiceCapability_RPC_PUBLISH_READONLY: + cs.HasPublishReadonly = true + case csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES: + cs.HasListVolumes = true + case csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES: + cs.HasListVolumesPublishedNodes = true + default: + continue + } + } + } + + return cs +} + +type ControllerPublishVolumeRequest struct { + VolumeID string + NodeID string + ReadOnly bool + VolumeCapability *VolumeCapability +} + +func (r *ControllerPublishVolumeRequest) ToCSIRepresentation() *csipbv1.ControllerPublishVolumeRequest { + if r == nil { + return nil + } + + return &csipbv1.ControllerPublishVolumeRequest{ + VolumeId: r.VolumeID, + NodeId: r.NodeID, + Readonly: r.ReadOnly, + VolumeCapability: r.VolumeCapability.ToCSIRepresentation(), + } +} + +func (r *ControllerPublishVolumeRequest) Validate() error { + if r.VolumeID == "" { + return errors.New("missing VolumeID") + } + if r.NodeID == "" { + return errors.New("missing NodeID") + } + return nil +} + +type ControllerPublishVolumeResponse struct { + PublishContext map[string]string +} + +type ControllerUnpublishVolumeRequest struct { + VolumeID string + NodeID string +} + +func (r *ControllerUnpublishVolumeRequest) ToCSIRepresentation() *csipbv1.ControllerUnpublishVolumeRequest { + if r == nil { + return nil + } + + return &csipbv1.ControllerUnpublishVolumeRequest{ + VolumeId: r.VolumeID, + NodeId: r.NodeID, + } +} + +func (r *ControllerUnpublishVolumeRequest) Validate() error { + if r.VolumeID == "" { + return errors.New("missing VolumeID") + } + if r.NodeID == "" { + // the spec allows this but it would unpublish the + // volume from all nodes + return errors.New("missing NodeID") + } + return nil +} + +type ControllerUnpublishVolumeResponse struct{} + +type NodeCapabilitySet struct { + HasStageUnstageVolume bool +} + +func NewNodeCapabilitySet(resp *csipbv1.NodeGetCapabilitiesResponse) *NodeCapabilitySet { + cs := &NodeCapabilitySet{} + pluginCapabilities := resp.GetCapabilities() + for _, pcap := range pluginCapabilities { + if c := pcap.GetRpc(); c != nil { + switch c.Type { + case csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: + cs.HasStageUnstageVolume = true + default: + continue + } + } + } + + return cs +} + +// VolumeAccessMode represents the desired access mode of the CSI Volume +type VolumeAccessMode csipbv1.VolumeCapability_AccessMode_Mode + +var _ fmt.Stringer = VolumeAccessModeUnknown + +var ( + VolumeAccessModeUnknown = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_UNKNOWN) + VolumeAccessModeSingleNodeWriter = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_WRITER) + VolumeAccessModeSingleNodeReaderOnly = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY) + VolumeAccessModeMultiNodeReaderOnly = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY) + VolumeAccessModeMultiNodeSingleWriter = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER) + VolumeAccessModeMultiNodeMultiWriter = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER) +) + +func (a VolumeAccessMode) String() string { + return a.ToCSIRepresentation().String() +} + +func (a VolumeAccessMode) ToCSIRepresentation() csipbv1.VolumeCapability_AccessMode_Mode { + return csipbv1.VolumeCapability_AccessMode_Mode(a) +} + +// VolumeAccessType represents the filesystem apis that the user intends to use +// with the volume. E.g whether it will be used as a block device or if they wish +// to have a mounted filesystem. +type VolumeAccessType int32 + +var _ fmt.Stringer = VolumeAccessTypeBlock + +var ( + VolumeAccessTypeBlock VolumeAccessType = 1 + VolumeAccessTypeMount VolumeAccessType = 2 +) + +func (v VolumeAccessType) String() string { + if v == VolumeAccessTypeBlock { + return "VolumeAccessType.Block" + } else if v == VolumeAccessTypeMount { + return "VolumeAccessType.Mount" + } else { + return "VolumeAccessType.Unspecified" + } +} + +// VolumeCapability describes the overall usage requirements for a given CSI Volume +type VolumeCapability struct { + AccessType VolumeAccessType + AccessMode VolumeAccessMode + + // Indicate that the volume will be accessed via the filesystem API. + MountVolume *structs.CSIMountOptions +} + +func VolumeCapabilityFromStructs(sAccessType structs.CSIVolumeAttachmentMode, sAccessMode structs.CSIVolumeAccessMode) (*VolumeCapability, error) { + var accessType VolumeAccessType + switch sAccessType { + case structs.CSIVolumeAttachmentModeBlockDevice: + accessType = VolumeAccessTypeBlock + case structs.CSIVolumeAttachmentModeFilesystem: + accessType = VolumeAccessTypeMount + default: + // These fields are validated during job submission, but here we perform a + // final check during transformation into the requisite CSI Data type to + // defend against development bugs and corrupted state - and incompatible + // nomad versions in the future. + return nil, fmt.Errorf("Unknown volume attachment mode: %s", sAccessType) + } + + var accessMode VolumeAccessMode + switch sAccessMode { + case structs.CSIVolumeAccessModeSingleNodeReader: + accessMode = VolumeAccessModeSingleNodeReaderOnly + case structs.CSIVolumeAccessModeSingleNodeWriter: + accessMode = VolumeAccessModeSingleNodeWriter + case structs.CSIVolumeAccessModeMultiNodeMultiWriter: + accessMode = VolumeAccessModeMultiNodeMultiWriter + case structs.CSIVolumeAccessModeMultiNodeSingleWriter: + accessMode = VolumeAccessModeMultiNodeSingleWriter + case structs.CSIVolumeAccessModeMultiNodeReader: + accessMode = VolumeAccessModeMultiNodeReaderOnly + default: + // These fields are validated during job submission, but here we perform a + // final check during transformation into the requisite CSI Data type to + // defend against development bugs and corrupted state - and incompatible + // nomad versions in the future. + return nil, fmt.Errorf("Unknown volume access mode: %v", sAccessMode) + } + + return &VolumeCapability{ + AccessType: accessType, + AccessMode: accessMode, + }, nil +} + +func (c *VolumeCapability) ToCSIRepresentation() *csipbv1.VolumeCapability { + if c == nil { + return nil + } + + vc := &csipbv1.VolumeCapability{ + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: c.AccessMode.ToCSIRepresentation(), + }, + } + + if c.AccessType == VolumeAccessTypeMount { + opts := &csipbv1.VolumeCapability_MountVolume{} + if c.MountVolume != nil { + opts.FsType = c.MountVolume.FSType + opts.MountFlags = c.MountVolume.MountFlags + } + vc.AccessType = &csipbv1.VolumeCapability_Mount{Mount: opts} + } else { + vc.AccessType = &csipbv1.VolumeCapability_Block{Block: &csipbv1.VolumeCapability_BlockVolume{}} + } + + return vc +} diff --git a/plugins/csi/testing/client.go b/plugins/csi/testing/client.go new file mode 100644 index 000000000..f28d9287e --- /dev/null +++ b/plugins/csi/testing/client.go @@ -0,0 +1,129 @@ +package testing + +import ( + "context" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" +) + +// IdentityClient is a CSI identity client used for testing +type IdentityClient struct { + NextErr error + NextPluginInfo *csipbv1.GetPluginInfoResponse + NextPluginCapabilities *csipbv1.GetPluginCapabilitiesResponse + NextPluginProbe *csipbv1.ProbeResponse +} + +// NewIdentityClient returns a new IdentityClient +func NewIdentityClient() *IdentityClient { + return &IdentityClient{} +} + +func (f *IdentityClient) Reset() { + f.NextErr = nil + f.NextPluginInfo = nil + f.NextPluginCapabilities = nil + f.NextPluginProbe = nil +} + +// GetPluginInfo returns plugin info +func (f *IdentityClient) GetPluginInfo(ctx context.Context, in *csipbv1.GetPluginInfoRequest, opts ...grpc.CallOption) (*csipbv1.GetPluginInfoResponse, error) { + return f.NextPluginInfo, f.NextErr +} + +// GetPluginCapabilities implements csi method +func (f *IdentityClient) GetPluginCapabilities(ctx context.Context, in *csipbv1.GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.GetPluginCapabilitiesResponse, error) { + return f.NextPluginCapabilities, f.NextErr +} + +// Probe implements csi method +func (f *IdentityClient) Probe(ctx context.Context, in *csipbv1.ProbeRequest, opts ...grpc.CallOption) (*csipbv1.ProbeResponse, error) { + return f.NextPluginProbe, f.NextErr +} + +// ControllerClient is a CSI controller client used for testing +type ControllerClient struct { + NextErr error + NextCapabilitiesResponse *csipbv1.ControllerGetCapabilitiesResponse + NextPublishVolumeResponse *csipbv1.ControllerPublishVolumeResponse + NextUnpublishVolumeResponse *csipbv1.ControllerUnpublishVolumeResponse +} + +// NewControllerClient returns a new ControllerClient +func NewControllerClient() *ControllerClient { + return &ControllerClient{} +} + +func (f *ControllerClient) Reset() { + f.NextErr = nil + f.NextCapabilitiesResponse = nil + f.NextPublishVolumeResponse = nil + f.NextUnpublishVolumeResponse = nil +} + +func (c *ControllerClient) ControllerGetCapabilities(ctx context.Context, in *csipbv1.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ControllerGetCapabilitiesResponse, error) { + return c.NextCapabilitiesResponse, c.NextErr +} + +func (c *ControllerClient) ControllerPublishVolume(ctx context.Context, in *csipbv1.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerPublishVolumeResponse, error) { + return c.NextPublishVolumeResponse, c.NextErr +} + +func (c *ControllerClient) ControllerUnpublishVolume(ctx context.Context, in *csipbv1.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerUnpublishVolumeResponse, error) { + return c.NextUnpublishVolumeResponse, c.NextErr +} + +func (c *ControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *csipbv1.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ValidateVolumeCapabilitiesResponse, error) { + panic("not implemented") // TODO: Implement +} + +// NodeClient is a CSI Node client used for testing +type NodeClient struct { + NextErr error + NextCapabilitiesResponse *csipbv1.NodeGetCapabilitiesResponse + NextGetInfoResponse *csipbv1.NodeGetInfoResponse + NextStageVolumeResponse *csipbv1.NodeStageVolumeResponse + NextUnstageVolumeResponse *csipbv1.NodeUnstageVolumeResponse + NextPublishVolumeResponse *csipbv1.NodePublishVolumeResponse + NextUnpublishVolumeResponse *csipbv1.NodeUnpublishVolumeResponse +} + +// NewNodeClient returns a new stub NodeClient +func NewNodeClient() *NodeClient { + return &NodeClient{} +} + +func (f *NodeClient) Reset() { + f.NextErr = nil + f.NextCapabilitiesResponse = nil + f.NextGetInfoResponse = nil + f.NextStageVolumeResponse = nil + f.NextUnstageVolumeResponse = nil + f.NextPublishVolumeResponse = nil + f.NextUnpublishVolumeResponse = nil +} + +func (c *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) { + return c.NextCapabilitiesResponse, c.NextErr +} + +func (c *NodeClient) NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetInfoResponse, error) { + return c.NextGetInfoResponse, c.NextErr +} + +func (c *NodeClient) NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeStageVolumeResponse, error) { + return c.NextStageVolumeResponse, c.NextErr +} + +func (c *NodeClient) NodeUnstageVolume(ctx context.Context, in *csipbv1.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnstageVolumeResponse, error) { + return c.NextUnstageVolumeResponse, c.NextErr +} + +func (c *NodeClient) NodePublishVolume(ctx context.Context, in *csipbv1.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodePublishVolumeResponse, error) { + return c.NextPublishVolumeResponse, c.NextErr +} + +func (c *NodeClient) NodeUnpublishVolume(ctx context.Context, in *csipbv1.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnpublishVolumeResponse, error) { + return c.NextUnpublishVolumeResponse, c.NextErr +} diff --git a/scheduler/feasible.go b/scheduler/feasible.go index fbe184bb3..35ce4f57c 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -7,12 +7,24 @@ import ( "strconv" "strings" + memdb "github.com/hashicorp/go-memdb" version "github.com/hashicorp/go-version" "github.com/hashicorp/nomad/helper/constraints/semver" "github.com/hashicorp/nomad/nomad/structs" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" ) +const ( + FilterConstraintHostVolumes = "missing compatible host volumes" + FilterConstraintCSIPlugins = "missing CSI plugins" + FilterConstraintCSIVolumesLookupFailed = "CSI volume lookup failed" + FilterConstraintCSIVolumeNotFoundTemplate = "missing CSI Volume %s" + FilterConstraintCSIVolumeNoReadTemplate = "CSI volume %s has exhausted its available reader claims" + FilterConstraintCSIVolumeNoWriteTemplate = "CSI volume %s has exhausted its available writer claims or is read-only" + FilterConstraintDrivers = "missing drivers" + FilterConstraintDevices = "missing devices" +) + // FeasibleIterator is used to iteratively yield nodes that // match feasibility constraints. The iterators may manage // some state for performance optimizations. @@ -61,14 +73,14 @@ func (iter *StaticIterator) Next() *structs.Node { // Check if exhausted n := len(iter.nodes) if iter.offset == n || iter.seen == n { - if iter.seen != n { + if iter.seen != n { // seen has been Reset() to 0 iter.offset = 0 } else { return nil } } - // Return the next offset + // Return the next offset, use this one offset := iter.offset iter.offset += 1 iter.seen += 1 @@ -117,7 +129,6 @@ func NewHostVolumeChecker(ctx Context) *HostVolumeChecker { // SetVolumes takes the volumes required by a task group and updates the checker. func (h *HostVolumeChecker) SetVolumes(volumes map[string]*structs.VolumeRequest) { lookupMap := make(map[string][]*structs.VolumeRequest) - // Convert the map from map[DesiredName]Request to map[Source][]Request to improve // lookup performance. Also filter non-host volumes. for _, req := range volumes { @@ -135,7 +146,7 @@ func (h *HostVolumeChecker) Feasible(candidate *structs.Node) bool { return true } - h.ctx.Metrics().FilterNode(candidate, "missing compatible host volumes") + h.ctx.Metrics().FilterNode(candidate, FilterConstraintHostVolumes) return false } @@ -177,6 +188,86 @@ func (h *HostVolumeChecker) hasVolumes(n *structs.Node) bool { return true } +type CSIVolumeChecker struct { + ctx Context + namespace string + volumes map[string]*structs.VolumeRequest +} + +func NewCSIVolumeChecker(ctx Context) *CSIVolumeChecker { + return &CSIVolumeChecker{ + ctx: ctx, + } +} + +func (c *CSIVolumeChecker) SetNamespace(namespace string) { + c.namespace = namespace +} + +func (c *CSIVolumeChecker) SetVolumes(volumes map[string]*structs.VolumeRequest) { + xs := make(map[string]*structs.VolumeRequest) + // Filter to only CSI Volumes + for alias, req := range volumes { + if req.Type != structs.VolumeTypeCSI { + continue + } + + xs[alias] = req + } + c.volumes = xs +} + +func (c *CSIVolumeChecker) Feasible(n *structs.Node) bool { + hasPlugins, failReason := c.hasPlugins(n) + + if hasPlugins { + return true + } + + c.ctx.Metrics().FilterNode(n, failReason) + return false +} + +func (c *CSIVolumeChecker) hasPlugins(n *structs.Node) (bool, string) { + // We can mount the volume if + // - if required, a healthy controller plugin is running the driver + // - the volume has free claims + // - this node is running the node plugin, implies matching topology + + // Fast path: Requested no volumes. No need to check further. + if len(c.volumes) == 0 { + return true, "" + } + + ws := memdb.NewWatchSet() + for _, req := range c.volumes { + // Get the volume to check that it's healthy (there's a healthy controller + // and the volume hasn't encountered an error or been marked for GC + vol, err := c.ctx.State().CSIVolumeByID(ws, c.namespace, req.Source) + if err != nil { + return false, FilterConstraintCSIVolumesLookupFailed + } + if vol == nil { + return false, fmt.Sprintf(FilterConstraintCSIVolumeNotFoundTemplate, req.Source) + } + + // Check that this node has a healthy running plugin with the right PluginID + plugin, ok := n.CSINodePlugins[vol.PluginID] + if !(ok && plugin.Healthy) { + return false, FilterConstraintCSIPlugins + } + if req.ReadOnly { + if !vol.CanReadOnly() { + return false, fmt.Sprintf(FilterConstraintCSIVolumeNoReadTemplate, vol.ID) + } + } else if !vol.CanWrite() { + return false, fmt.Sprintf(FilterConstraintCSIVolumeNoWriteTemplate, vol.ID) + } + } + + return true, "" +} + // DriverChecker is a FeasibilityChecker which returns whether a node has the // drivers necessary to scheduler a task group. type DriverChecker struct { @@ -201,7 +292,7 @@ func (c *DriverChecker) Feasible(option *structs.Node) bool { if c.hasDrivers(option) { return true } - c.ctx.Metrics().FilterNode(option, "missing drivers") + c.ctx.Metrics().FilterNode(option, FilterConstraintDrivers) return false } @@ -780,18 +871,20 @@ type FeasibilityWrapper struct { source FeasibleIterator jobCheckers []FeasibilityChecker tgCheckers []FeasibilityChecker + tgAvailable []FeasibilityChecker tg string } // NewFeasibilityWrapper returns a FeasibleIterator based on the passed source // and FeasibilityCheckers. func NewFeasibilityWrapper(ctx Context, source FeasibleIterator, - jobCheckers, tgCheckers []FeasibilityChecker) *FeasibilityWrapper { + jobCheckers, tgCheckers, tgAvailable []FeasibilityChecker) *FeasibilityWrapper { return &FeasibilityWrapper{ ctx: ctx, source: source, jobCheckers: jobCheckers, tgCheckers: tgCheckers, + tgAvailable: tgAvailable, } } @@ -858,7 +951,12 @@ OUTER: continue case EvalComputedClassEligible: // Fast path the eligible case - return option + if w.available(option) { + return option + } + // We match the class but are temporarily unavailable, the eval + // should be blocked + return nil case EvalComputedClassEscaped: tgEscaped = true case EvalComputedClassUnknown: @@ -884,10 +982,32 @@ OUTER: evalElig.SetTaskGroupEligibility(true, w.tg, option.ComputedClass) } + // tgAvailable handlers are available transiently, so we test them without + // affecting the computed class + if !w.available(option) { + continue OUTER + } + return option } } +// available checks transient feasibility checkers which depend on changing conditions, +// e.g. the health status of a plugin or driver +func (w *FeasibilityWrapper) available(option *structs.Node) bool { + // If we don't have any availability checks, we're available + if len(w.tgAvailable) == 0 { + return true + } + + for _, check := range w.tgAvailable { + if !check.Feasible(option) { + return false + } + } + return true +} + // DeviceChecker is a FeasibilityChecker which returns whether a node has the // devices necessary to scheduler a task group. type DeviceChecker struct { @@ -920,7 +1040,7 @@ func (c *DeviceChecker) Feasible(option *structs.Node) bool { return true } - c.ctx.Metrics().FilterNode(option, "missing devices") + c.ctx.Metrics().FilterNode(option, FilterConstraintDevices) return false } diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index 62b39acf8..c7eac930e 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -223,6 +223,127 @@ func TestHostVolumeChecker_ReadOnly(t *testing.T) { Result: true, }, } + for i, c := range cases { + checker.SetVolumes(c.RequestedVolumes) + if act := checker.Feasible(c.Node); act != c.Result { + t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result) + } + } +} + +func TestCSIVolumeChecker(t *testing.T) { + t.Parallel() + state, ctx := testContext(t) + nodes := []*structs.Node{ + mock.Node(), + mock.Node(), + mock.Node(), + mock.Node(), + } + + // Register running plugins on some nodes + nodes[0].CSIControllerPlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{}, + }, + } + nodes[0].CSINodePlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + nodes[1].CSINodePlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: false, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + nodes[2].CSINodePlugins = map[string]*structs.CSIInfo{ + "bar": { + PluginID: "bar", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + + // Create the plugins in the state store + index := uint64(999) + for _, node := range nodes { + err := state.UpsertNode(index, node) + require.NoError(t, err) + index++ + } + + // Create the volume in the state store + vid := "volume-id" + vol := structs.NewCSIVolume(vid, index) + vol.PluginID = "foo" + vol.Namespace = structs.DefaultNamespace + vol.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter + vol.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + err := state.CSIVolumeRegister(index, []*structs.CSIVolume{vol}) + require.NoError(t, err) + + // Create volume requests + noVolumes := map[string]*structs.VolumeRequest{} + + volumes := map[string]*structs.VolumeRequest{ + "baz": { + Type: "csi", + Name: "baz", + Source: "volume-id", + }, + "nonsense": { + Type: "host", + Name: "nonsense", + Source: "my-host-volume", + }, + } + + checker := NewCSIVolumeChecker(ctx) + checker.SetNamespace(structs.DefaultNamespace) + + cases := []struct { + Node *structs.Node + RequestedVolumes map[string]*structs.VolumeRequest + Result bool + }{ + { // Get it + Node: nodes[0], + RequestedVolumes: volumes, + Result: true, + }, + { // Unhealthy + Node: nodes[1], + RequestedVolumes: volumes, + Result: false, + }, + { // Wrong id + Node: nodes[2], + RequestedVolumes: volumes, + Result: false, + }, + { // No Volumes requested or available + Node: nodes[3], + RequestedVolumes: noVolumes, + Result: true, + }, + { // No Volumes requested, some available + Node: nodes[0], + RequestedVolumes: noVolumes, + Result: true, + }, + { // Volumes requested, none available + Node: nodes[3], + RequestedVolumes: volumes, + Result: false, + }, + } for i, c := range cases { checker.SetVolumes(c.RequestedVolumes) @@ -1859,7 +1980,7 @@ func TestFeasibilityWrapper_JobIneligible(t *testing.T) { nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) mocked := newMockFeasibilityChecker(false) - wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil) + wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil, nil) // Set the job to ineligible ctx.Eligibility().SetJobEligibility(false, nodes[0].ComputedClass) @@ -1877,7 +1998,7 @@ func TestFeasibilityWrapper_JobEscapes(t *testing.T) { nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) mocked := newMockFeasibilityChecker(false) - wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil) + wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil, nil) // Set the job to escaped cc := nodes[0].ComputedClass @@ -1903,7 +2024,7 @@ func TestFeasibilityWrapper_JobAndTg_Eligible(t *testing.T) { static := NewStaticIterator(ctx, nodes) jobMock := newMockFeasibilityChecker(true) tgMock := newMockFeasibilityChecker(false) - wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}) + wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}, nil) // Set the job to escaped cc := nodes[0].ComputedClass @@ -1925,7 +2046,7 @@ func TestFeasibilityWrapper_JobEligible_TgIneligible(t *testing.T) { static := NewStaticIterator(ctx, nodes) jobMock := newMockFeasibilityChecker(true) tgMock := newMockFeasibilityChecker(false) - wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}) + wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}, nil) // Set the job to escaped cc := nodes[0].ComputedClass @@ -1947,7 +2068,7 @@ func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) { static := NewStaticIterator(ctx, nodes) jobMock := newMockFeasibilityChecker(true) tgMock := newMockFeasibilityChecker(true) - wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}) + wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}, nil) // Set the job to escaped cc := nodes[0].ComputedClass diff --git a/scheduler/scheduler.go b/scheduler/scheduler.go index 639b2b8cf..1b7b68635 100644 --- a/scheduler/scheduler.go +++ b/scheduler/scheduler.go @@ -91,6 +91,9 @@ type State interface { // SchedulerConfig returns config options for the scheduler SchedulerConfig() (uint64, *structs.SchedulerConfiguration, error) + + // CSIVolumeByID fetch CSI volumes, containing controller jobs + CSIVolumeByID(memdb.WatchSet, string, string) (*structs.CSIVolume, error) } // Planner interface is used to submit a task allocation plan. diff --git a/scheduler/stack.go b/scheduler/stack.go index 3e2b1b0b2..c673870b0 100644 --- a/scheduler/stack.go +++ b/scheduler/stack.go @@ -51,6 +51,7 @@ type GenericStack struct { taskGroupConstraint *ConstraintChecker taskGroupDevices *DeviceChecker taskGroupHostVolumes *HostVolumeChecker + taskGroupCSIVolumes *CSIVolumeChecker distinctHostsConstraint *DistinctHostsIterator distinctPropertyConstraint *DistinctPropertyIterator @@ -95,6 +96,7 @@ func (s *GenericStack) SetJob(job *structs.Job) { s.nodeAffinity.SetJob(job) s.spread.SetJob(job) s.ctx.Eligibility().SetJob(job) + s.taskGroupCSIVolumes.SetNamespace(job.Namespace) if contextual, ok := s.quota.(ContextualIterator); ok { contextual.SetJob(job) @@ -131,6 +133,7 @@ func (s *GenericStack) Select(tg *structs.TaskGroup, options *SelectOptions) *Ra s.taskGroupConstraint.SetConstraints(tgConstr.constraints) s.taskGroupDevices.SetTaskGroup(tg) s.taskGroupHostVolumes.SetVolumes(tg.Volumes) + s.taskGroupCSIVolumes.SetVolumes(tg.Volumes) s.distinctHostsConstraint.SetTaskGroup(tg) s.distinctPropertyConstraint.SetTaskGroup(tg) s.wrappedChecks.SetTaskGroup(tg.Name) @@ -174,6 +177,7 @@ type SystemStack struct { taskGroupConstraint *ConstraintChecker taskGroupDevices *DeviceChecker taskGroupHostVolumes *HostVolumeChecker + taskGroupCSIVolumes *CSIVolumeChecker distinctPropertyConstraint *DistinctPropertyIterator binPack *BinPackIterator @@ -205,6 +209,9 @@ func NewSystemStack(ctx Context) *SystemStack { // Filter on task group host volumes s.taskGroupHostVolumes = NewHostVolumeChecker(ctx) + // Filter on available, healthy CSI plugins + s.taskGroupCSIVolumes = NewCSIVolumeChecker(ctx) + // Filter on task group devices s.taskGroupDevices = NewDeviceChecker(ctx) @@ -213,8 +220,11 @@ func NewSystemStack(ctx Context) *SystemStack { // previously been marked as eligible or ineligible. Generally this will be // checks that only needs to examine the single node to determine feasibility. jobs := []FeasibilityChecker{s.jobConstraint} - tgs := []FeasibilityChecker{s.taskGroupDrivers, s.taskGroupConstraint, s.taskGroupHostVolumes, s.taskGroupDevices} - s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs) + tgs := []FeasibilityChecker{s.taskGroupDrivers, s.taskGroupConstraint, + s.taskGroupHostVolumes, + s.taskGroupDevices} + avail := []FeasibilityChecker{s.taskGroupCSIVolumes} + s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs, avail) // Filter on distinct property constraints. s.distinctPropertyConstraint = NewDistinctPropertyIterator(ctx, s.wrappedChecks) @@ -267,6 +277,7 @@ func (s *SystemStack) Select(tg *structs.TaskGroup, options *SelectOptions) *Ran s.taskGroupConstraint.SetConstraints(tgConstr.constraints) s.taskGroupDevices.SetTaskGroup(tg) s.taskGroupHostVolumes.SetVolumes(tg.Volumes) + s.taskGroupCSIVolumes.SetVolumes(tg.Volumes) s.wrappedChecks.SetTaskGroup(tg.Name) s.distinctPropertyConstraint.SetTaskGroup(tg) s.binPack.SetTaskGroup(tg) diff --git a/scheduler/stack_oss.go b/scheduler/stack_oss.go index 12caa98d9..50ff5a523 100644 --- a/scheduler/stack_oss.go +++ b/scheduler/stack_oss.go @@ -34,13 +34,20 @@ func NewGenericStack(batch bool, ctx Context) *GenericStack { // Filter on task group host volumes s.taskGroupHostVolumes = NewHostVolumeChecker(ctx) + // Filter on available, healthy CSI plugins + s.taskGroupCSIVolumes = NewCSIVolumeChecker(ctx) + // Create the feasibility wrapper which wraps all feasibility checks in // which feasibility checking can be skipped if the computed node class has // previously been marked as eligible or ineligible. Generally this will be // checks that only needs to examine the single node to determine feasibility. jobs := []FeasibilityChecker{s.jobConstraint} - tgs := []FeasibilityChecker{s.taskGroupDrivers, s.taskGroupConstraint, s.taskGroupHostVolumes, s.taskGroupDevices} - s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs) + tgs := []FeasibilityChecker{s.taskGroupDrivers, + s.taskGroupConstraint, + s.taskGroupHostVolumes, + s.taskGroupDevices} + avail := []FeasibilityChecker{s.taskGroupCSIVolumes} + s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs, avail) // Filter on distinct host constraints. s.distinctHostsConstraint = NewDistinctHostsIterator(ctx, s.wrappedChecks) diff --git a/scheduler/stack_test.go b/scheduler/stack_test.go index 21ac18ee9..db12c5820 100644 --- a/scheduler/stack_test.go +++ b/scheduler/stack_test.go @@ -231,6 +231,79 @@ func TestServiceStack_Select_DriverFilter(t *testing.T) { } } +func TestServiceStack_Select_CSI(t *testing.T) { + state, ctx := testContext(t) + nodes := []*structs.Node{ + mock.Node(), + mock.Node(), + } + + // Create a volume in the state store + index := uint64(999) + v := structs.NewCSIVolume("foo", index) + v.Namespace = structs.DefaultNamespace + v.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter + v.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + v.PluginID = "bar" + err := state.CSIVolumeRegister(999, []*structs.CSIVolume{v}) + require.NoError(t, err) + + // Create a node with healthy fingerprints for both controller and node plugins + zero := nodes[0] + zero.CSIControllerPlugins = map[string]*structs.CSIInfo{"bar": { + PluginID: "bar", + Healthy: true, + RequiresTopologies: false, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsReadOnlyAttach: true, + SupportsListVolumes: true, + }, + }} + zero.CSINodePlugins = map[string]*structs.CSIInfo{"bar": { + PluginID: "bar", + Healthy: true, + RequiresTopologies: false, + NodeInfo: &structs.CSINodeInfo{ + ID: zero.ID, + MaxVolumes: 2, + AccessibleTopology: nil, + RequiresNodeStageVolume: false, + }, + }} + + // Add the node to the state store to index the healthy plugins and mark the volume "foo" healthy + err = state.UpsertNode(1000, zero) + require.NoError(t, err) + + // Use the node to build the stack and test + if err := zero.ComputeClass(); err != nil { + t.Fatalf("ComputedClass() failed: %v", err) + } + + stack := NewGenericStack(false, ctx) + stack.SetNodes(nodes) + + job := mock.Job() + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{"foo": { + Name: "bar", + Type: structs.VolumeTypeCSI, + Source: "foo", + ReadOnly: true, + }} + + stack.SetJob(job) + + selectOptions := &SelectOptions{} + node := stack.Select(job.TaskGroups[0], selectOptions) + if node == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } + + if node.Node != zero { + t.Fatalf("bad") + } +} + func TestServiceStack_Select_ConstraintFilter(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ diff --git a/vendor/github.com/container-storage-interface/spec/LICENSE b/vendor/github.com/container-storage-interface/spec/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go new file mode 100644 index 000000000..e2e02d808 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go @@ -0,0 +1,5728 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/container-storage-interface/spec/csi.proto + +package csi + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type PluginCapability_Service_Type int32 + +const ( + PluginCapability_Service_UNKNOWN PluginCapability_Service_Type = 0 + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins MAY wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + PluginCapability_Service_CONTROLLER_SERVICE PluginCapability_Service_Type = 1 + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2 +) + +var PluginCapability_Service_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CONTROLLER_SERVICE", + 2: "VOLUME_ACCESSIBILITY_CONSTRAINTS", +} + +var PluginCapability_Service_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CONTROLLER_SERVICE": 1, + "VOLUME_ACCESSIBILITY_CONSTRAINTS": 2, +} + +func (x PluginCapability_Service_Type) String() string { + return proto.EnumName(PluginCapability_Service_Type_name, int32(x)) +} + +func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{4, 0, 0} +} + +type PluginCapability_VolumeExpansion_Type int32 + +const ( + PluginCapability_VolumeExpansion_UNKNOWN PluginCapability_VolumeExpansion_Type = 0 + // ONLINE indicates that volumes may be expanded when published to + // a node. When a Plugin implements this capability it MUST + // implement either the EXPAND_VOLUME controller capability or the + // EXPAND_VOLUME node capability or both. When a plugin supports + // ONLINE volume expansion and also has the EXPAND_VOLUME + // controller capability then the plugin MUST support expansion of + // volumes currently published and available on a node. When a + // plugin supports ONLINE volume expansion and also has the + // EXPAND_VOLUME node capability then the plugin MAY support + // expansion of node-published volume via NodeExpandVolume. + // + // Example 1: Given a shared filesystem volume (e.g. GlusterFs), + // the Plugin may set the ONLINE volume expansion capability and + // implement ControllerExpandVolume but not NodeExpandVolume. + // + // Example 2: Given a block storage volume type (e.g. EBS), the + // Plugin may set the ONLINE volume expansion capability and + // implement both ControllerExpandVolume and NodeExpandVolume. + // + // Example 3: Given a Plugin that supports volume expansion only + // upon a node, the Plugin may set the ONLINE volume + // expansion capability and implement NodeExpandVolume but not + // ControllerExpandVolume. + PluginCapability_VolumeExpansion_ONLINE PluginCapability_VolumeExpansion_Type = 1 + // OFFLINE indicates that volumes currently published and + // available on a node SHALL NOT be expanded via + // ControllerExpandVolume. When a plugin supports OFFLINE volume + // expansion it MUST implement either the EXPAND_VOLUME controller + // capability or both the EXPAND_VOLUME controller capability and + // the EXPAND_VOLUME node capability. + // + // Example 1: Given a block storage volume type (e.g. Azure Disk) + // that does not support expansion of "node-attached" (i.e. + // controller-published) volumes, the Plugin may indicate + // OFFLINE volume expansion support and implement both + // ControllerExpandVolume and NodeExpandVolume. + PluginCapability_VolumeExpansion_OFFLINE PluginCapability_VolumeExpansion_Type = 2 +) + +var PluginCapability_VolumeExpansion_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ONLINE", + 2: "OFFLINE", +} + +var PluginCapability_VolumeExpansion_Type_value = map[string]int32{ + "UNKNOWN": 0, + "ONLINE": 1, + "OFFLINE": 2, +} + +func (x PluginCapability_VolumeExpansion_Type) String() string { + return proto.EnumName(PluginCapability_VolumeExpansion_Type_name, int32(x)) +} + +func (PluginCapability_VolumeExpansion_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{4, 1, 0} +} + +type VolumeCapability_AccessMode_Mode int32 + +const ( + VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 + // Can only be published once as read/write on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 + // Can only be published once as readonly on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 + // Can be published as readonly at multiple nodes simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 + // Can be published as read/write at multiple nodes + // simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 +) + +var VolumeCapability_AccessMode_Mode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SINGLE_NODE_WRITER", + 2: "SINGLE_NODE_READER_ONLY", + 3: "MULTI_NODE_READER_ONLY", + 4: "MULTI_NODE_SINGLE_WRITER", + 5: "MULTI_NODE_MULTI_WRITER", +} + +var VolumeCapability_AccessMode_Mode_value = map[string]int32{ + "UNKNOWN": 0, + "SINGLE_NODE_WRITER": 1, + "SINGLE_NODE_READER_ONLY": 2, + "MULTI_NODE_READER_ONLY": 3, + "MULTI_NODE_SINGLE_WRITER": 4, + "MULTI_NODE_MULTI_WRITER": 5, +} + +func (x VolumeCapability_AccessMode_Mode) String() string { + return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) +} + +func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{10, 2, 0} +} + +type ControllerServiceCapability_RPC_Type int32 + +const ( + ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 + ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 + ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 + ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 + ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT ControllerServiceCapability_RPC_Type = 5 + ControllerServiceCapability_RPC_LIST_SNAPSHOTS ControllerServiceCapability_RPC_Type = 6 + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + ControllerServiceCapability_RPC_CLONE_VOLUME ControllerServiceCapability_RPC_Type = 7 + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + ControllerServiceCapability_RPC_PUBLISH_READONLY ControllerServiceCapability_RPC_Type = 8 + // See VolumeExpansion for details. + ControllerServiceCapability_RPC_EXPAND_VOLUME ControllerServiceCapability_RPC_Type = 9 + // Indicates the SP supports the + // ListVolumesResponse.entry.published_nodes field + ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES ControllerServiceCapability_RPC_Type = 10 +) + +var ControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_VOLUME", + 2: "PUBLISH_UNPUBLISH_VOLUME", + 3: "LIST_VOLUMES", + 4: "GET_CAPACITY", + 5: "CREATE_DELETE_SNAPSHOT", + 6: "LIST_SNAPSHOTS", + 7: "CLONE_VOLUME", + 8: "PUBLISH_READONLY", + 9: "EXPAND_VOLUME", + 10: "LIST_VOLUMES_PUBLISHED_NODES", +} + +var ControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_VOLUME": 1, + "PUBLISH_UNPUBLISH_VOLUME": 2, + "LIST_VOLUMES": 3, + "GET_CAPACITY": 4, + "CREATE_DELETE_SNAPSHOT": 5, + "LIST_SNAPSHOTS": 6, + "CLONE_VOLUME": 7, + "PUBLISH_READONLY": 8, + "EXPAND_VOLUME": 9, + "LIST_VOLUMES_PUBLISHED_NODES": 10, +} + +func (x ControllerServiceCapability_RPC_Type) String() string { + return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) +} + +func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{29, 0, 0} +} + +type VolumeUsage_Unit int32 + +const ( + VolumeUsage_UNKNOWN VolumeUsage_Unit = 0 + VolumeUsage_BYTES VolumeUsage_Unit = 1 + VolumeUsage_INODES VolumeUsage_Unit = 2 +) + +var VolumeUsage_Unit_name = map[int32]string{ + 0: "UNKNOWN", + 1: "BYTES", + 2: "INODES", +} + +var VolumeUsage_Unit_value = map[string]int32{ + "UNKNOWN": 0, + "BYTES": 1, + "INODES": 2, +} + +func (x VolumeUsage_Unit) String() string { + return proto.EnumName(VolumeUsage_Unit_name, int32(x)) +} + +func (VolumeUsage_Unit) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{49, 0} +} + +type NodeServiceCapability_RPC_Type int32 + +const ( + NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 + NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME NodeServiceCapability_RPC_Type = 1 + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + NodeServiceCapability_RPC_GET_VOLUME_STATS NodeServiceCapability_RPC_Type = 2 + // See VolumeExpansion for details. + NodeServiceCapability_RPC_EXPAND_VOLUME NodeServiceCapability_RPC_Type = 3 +) + +var NodeServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STAGE_UNSTAGE_VOLUME", + 2: "GET_VOLUME_STATS", + 3: "EXPAND_VOLUME", +} + +var NodeServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STAGE_UNSTAGE_VOLUME": 1, + "GET_VOLUME_STATS": 2, + "EXPAND_VOLUME": 3, +} + +func (x NodeServiceCapability_RPC_Type) String() string { + return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) +} + +func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{52, 0, 0} +} + +type GetPluginInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } +func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoRequest) ProtoMessage() {} +func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{0} +} + +func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b) +} +func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic) +} +func (m *GetPluginInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoRequest.Merge(m, src) +} +func (m *GetPluginInfoRequest) XXX_Size() int { + return xxx_messageInfo_GetPluginInfoRequest.Size(m) +} +func (m *GetPluginInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginInfoRequest proto.InternalMessageInfo + +type GetPluginInfoResponse struct { + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This field is REQUIRED. Value of this field is opaque to the CO. + VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion,proto3" json:"vendor_version,omitempty"` + // This field is OPTIONAL. Values are opaque to the CO. + Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest,proto3" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } +func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoResponse) ProtoMessage() {} +func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{1} +} + +func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b) +} +func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic) +} +func (m *GetPluginInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoResponse.Merge(m, src) +} +func (m *GetPluginInfoResponse) XXX_Size() int { + return xxx_messageInfo_GetPluginInfoResponse.Size(m) +} +func (m *GetPluginInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginInfoResponse proto.InternalMessageInfo + +func (m *GetPluginInfoResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetPluginInfoResponse) GetVendorVersion() string { + if m != nil { + return m.VendorVersion + } + return "" +} + +func (m *GetPluginInfoResponse) GetManifest() map[string]string { + if m != nil { + return m.Manifest + } + return nil +} + +type GetPluginCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginCapabilitiesRequest) Reset() { *m = GetPluginCapabilitiesRequest{} } +func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesRequest) ProtoMessage() {} +func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{2} +} + +func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b) +} +func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (m *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(m, src) +} +func (m *GetPluginCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m) +} +func (m *GetPluginCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginCapabilitiesRequest proto.InternalMessageInfo + +type GetPluginCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*PluginCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginCapabilitiesResponse) Reset() { *m = GetPluginCapabilitiesResponse{} } +func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesResponse) ProtoMessage() {} +func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{3} +} + +func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b) +} +func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (m *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(m, src) +} +func (m *GetPluginCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m) +} +func (m *GetPluginCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginCapabilitiesResponse proto.InternalMessageInfo + +func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the plugin. +type PluginCapability struct { + // Types that are valid to be assigned to Type: + // *PluginCapability_Service_ + // *PluginCapability_VolumeExpansion_ + Type isPluginCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability) Reset() { *m = PluginCapability{} } +func (m *PluginCapability) String() string { return proto.CompactTextString(m) } +func (*PluginCapability) ProtoMessage() {} +func (*PluginCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{4} +} + +func (m *PluginCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability.Unmarshal(m, b) +} +func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic) +} +func (m *PluginCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability.Merge(m, src) +} +func (m *PluginCapability) XXX_Size() int { + return xxx_messageInfo_PluginCapability.Size(m) +} +func (m *PluginCapability) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability proto.InternalMessageInfo + +type isPluginCapability_Type interface { + isPluginCapability_Type() +} + +type PluginCapability_Service_ struct { + Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,proto3,oneof"` +} + +type PluginCapability_VolumeExpansion_ struct { + VolumeExpansion *PluginCapability_VolumeExpansion `protobuf:"bytes,2,opt,name=volume_expansion,json=volumeExpansion,proto3,oneof"` +} + +func (*PluginCapability_Service_) isPluginCapability_Type() {} + +func (*PluginCapability_VolumeExpansion_) isPluginCapability_Type() {} + +func (m *PluginCapability) GetType() isPluginCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *PluginCapability) GetService() *PluginCapability_Service { + if x, ok := m.GetType().(*PluginCapability_Service_); ok { + return x.Service + } + return nil +} + +func (m *PluginCapability) GetVolumeExpansion() *PluginCapability_VolumeExpansion { + if x, ok := m.GetType().(*PluginCapability_VolumeExpansion_); ok { + return x.VolumeExpansion + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*PluginCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*PluginCapability_Service_)(nil), + (*PluginCapability_VolumeExpansion_)(nil), + } +} + +type PluginCapability_Service struct { + Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_Service_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability_Service) Reset() { *m = PluginCapability_Service{} } +func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) } +func (*PluginCapability_Service) ProtoMessage() {} +func (*PluginCapability_Service) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{4, 0} +} + +func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b) +} +func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic) +} +func (m *PluginCapability_Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability_Service.Merge(m, src) +} +func (m *PluginCapability_Service) XXX_Size() int { + return xxx_messageInfo_PluginCapability_Service.Size(m) +} +func (m *PluginCapability_Service) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability_Service proto.InternalMessageInfo + +func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type { + if m != nil { + return m.Type + } + return PluginCapability_Service_UNKNOWN +} + +type PluginCapability_VolumeExpansion struct { + Type PluginCapability_VolumeExpansion_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_VolumeExpansion_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability_VolumeExpansion) Reset() { *m = PluginCapability_VolumeExpansion{} } +func (m *PluginCapability_VolumeExpansion) String() string { return proto.CompactTextString(m) } +func (*PluginCapability_VolumeExpansion) ProtoMessage() {} +func (*PluginCapability_VolumeExpansion) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{4, 1} +} + +func (m *PluginCapability_VolumeExpansion) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability_VolumeExpansion.Unmarshal(m, b) +} +func (m *PluginCapability_VolumeExpansion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability_VolumeExpansion.Marshal(b, m, deterministic) +} +func (m *PluginCapability_VolumeExpansion) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability_VolumeExpansion.Merge(m, src) +} +func (m *PluginCapability_VolumeExpansion) XXX_Size() int { + return xxx_messageInfo_PluginCapability_VolumeExpansion.Size(m) +} +func (m *PluginCapability_VolumeExpansion) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability_VolumeExpansion.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability_VolumeExpansion proto.InternalMessageInfo + +func (m *PluginCapability_VolumeExpansion) GetType() PluginCapability_VolumeExpansion_Type { + if m != nil { + return m.Type + } + return PluginCapability_VolumeExpansion_UNKNOWN +} + +type ProbeRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbeRequest) Reset() { *m = ProbeRequest{} } +func (m *ProbeRequest) String() string { return proto.CompactTextString(m) } +func (*ProbeRequest) ProtoMessage() {} +func (*ProbeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{5} +} + +func (m *ProbeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbeRequest.Unmarshal(m, b) +} +func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic) +} +func (m *ProbeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeRequest.Merge(m, src) +} +func (m *ProbeRequest) XXX_Size() int { + return xxx_messageInfo_ProbeRequest.Size(m) +} +func (m *ProbeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeRequest proto.InternalMessageInfo + +type ProbeResponse struct { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + Ready *wrappers.BoolValue `protobuf:"bytes,1,opt,name=ready,proto3" json:"ready,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbeResponse) Reset() { *m = ProbeResponse{} } +func (m *ProbeResponse) String() string { return proto.CompactTextString(m) } +func (*ProbeResponse) ProtoMessage() {} +func (*ProbeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{6} +} + +func (m *ProbeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbeResponse.Unmarshal(m, b) +} +func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic) +} +func (m *ProbeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeResponse.Merge(m, src) +} +func (m *ProbeResponse) XXX_Size() int { + return xxx_messageInfo_ProbeResponse.Size(m) +} +func (m *ProbeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeResponse proto.InternalMessageInfo + +func (m *ProbeResponse) GetReady() *wrappers.BoolValue { + if m != nil { + return m.Ready + } + return nil +} + +type CreateVolumeRequest struct { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. If + // specified it MUST always be honored, even when creating volumes + // from a source; which MAY force some backends to internally extend + // the volume after creating it. + CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"` + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource *VolumeContentSource `protobuf:"bytes,6,opt,name=volume_content_source,json=volumeContentSource,proto3" json:"volume_content_source,omitempty"` + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. + AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,7,opt,name=accessibility_requirements,json=accessibilityRequirements,proto3" json:"accessibility_requirements,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } +func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeRequest) ProtoMessage() {} +func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{7} +} + +func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b) +} +func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic) +} +func (m *CreateVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeRequest.Merge(m, src) +} +func (m *CreateVolumeRequest) XXX_Size() int { + return xxx_messageInfo_CreateVolumeRequest.Size(m) +} +func (m *CreateVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeRequest proto.InternalMessageInfo + +func (m *CreateVolumeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *CreateVolumeRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *CreateVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource { + if m != nil { + return m.VolumeContentSource + } + return nil +} + +func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement { + if m != nil { + return m.AccessibilityRequirements + } + return nil +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +type VolumeContentSource struct { + // Types that are valid to be assigned to Type: + // *VolumeContentSource_Snapshot + // *VolumeContentSource_Volume + Type isVolumeContentSource_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource) Reset() { *m = VolumeContentSource{} } +func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource) ProtoMessage() {} +func (*VolumeContentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{8} +} + +func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b) +} +func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic) +} +func (m *VolumeContentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource.Merge(m, src) +} +func (m *VolumeContentSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource.Size(m) +} +func (m *VolumeContentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource proto.InternalMessageInfo + +type isVolumeContentSource_Type interface { + isVolumeContentSource_Type() +} + +type VolumeContentSource_Snapshot struct { + Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,proto3,oneof"` +} + +type VolumeContentSource_Volume struct { + Volume *VolumeContentSource_VolumeSource `protobuf:"bytes,2,opt,name=volume,proto3,oneof"` +} + +func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {} + +func (*VolumeContentSource_Volume) isVolumeContentSource_Type() {} + +func (m *VolumeContentSource) GetType() isVolumeContentSource_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource { + if x, ok := m.GetType().(*VolumeContentSource_Snapshot); ok { + return x.Snapshot + } + return nil +} + +func (m *VolumeContentSource) GetVolume() *VolumeContentSource_VolumeSource { + if x, ok := m.GetType().(*VolumeContentSource_Volume); ok { + return x.Volume + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*VolumeContentSource) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*VolumeContentSource_Snapshot)(nil), + (*VolumeContentSource_Volume)(nil), + } +} + +type VolumeContentSource_SnapshotSource struct { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource_SnapshotSource) Reset() { *m = VolumeContentSource_SnapshotSource{} } +func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource_SnapshotSource) ProtoMessage() {} +func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{8, 0} +} + +func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(m, src) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m) +} +func (m *VolumeContentSource_SnapshotSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource_SnapshotSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource_SnapshotSource proto.InternalMessageInfo + +func (m *VolumeContentSource_SnapshotSource) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +type VolumeContentSource_VolumeSource struct { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource_VolumeSource) Reset() { *m = VolumeContentSource_VolumeSource{} } +func (m *VolumeContentSource_VolumeSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource_VolumeSource) ProtoMessage() {} +func (*VolumeContentSource_VolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{8, 1} +} + +func (m *VolumeContentSource_VolumeSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource_VolumeSource.Unmarshal(m, b) +} +func (m *VolumeContentSource_VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource_VolumeSource.Marshal(b, m, deterministic) +} +func (m *VolumeContentSource_VolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource_VolumeSource.Merge(m, src) +} +func (m *VolumeContentSource_VolumeSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource_VolumeSource.Size(m) +} +func (m *VolumeContentSource_VolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource_VolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource_VolumeSource proto.InternalMessageInfo + +func (m *VolumeContentSource_VolumeSource) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +type CreateVolumeResponse struct { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } +func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeResponse) ProtoMessage() {} +func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{9} +} + +func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b) +} +func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic) +} +func (m *CreateVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeResponse.Merge(m, src) +} +func (m *CreateVolumeResponse) XXX_Size() int { + return xxx_messageInfo_CreateVolumeResponse.Size(m) +} +func (m *CreateVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeResponse proto.InternalMessageInfo + +func (m *CreateVolumeResponse) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +// Specify a capability of a volume. +type VolumeCapability struct { + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + // + // Types that are valid to be assigned to AccessType: + // *VolumeCapability_Block + // *VolumeCapability_Mount + AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` + // This is a REQUIRED field. + AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode,proto3" json:"access_mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } +func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability) ProtoMessage() {} +func (*VolumeCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{10} +} + +func (m *VolumeCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability.Unmarshal(m, b) +} +func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic) +} +func (m *VolumeCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability.Merge(m, src) +} +func (m *VolumeCapability) XXX_Size() int { + return xxx_messageInfo_VolumeCapability.Size(m) +} +func (m *VolumeCapability) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability proto.InternalMessageInfo + +type isVolumeCapability_AccessType interface { + isVolumeCapability_AccessType() +} + +type VolumeCapability_Block struct { + Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,proto3,oneof"` +} + +type VolumeCapability_Mount struct { + Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,proto3,oneof"` +} + +func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} + +func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} + +func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { + if m != nil { + return m.AccessType + } + return nil +} + +func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { + return x.Block + } + return nil +} + +func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { + return x.Mount + } + return nil +} + +func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { + if m != nil { + return m.AccessMode + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*VolumeCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*VolumeCapability_Block)(nil), + (*VolumeCapability_Mount)(nil), + } +} + +// Indicate that the volume will be accessed via the block device API. +type VolumeCapability_BlockVolume struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } +func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_BlockVolume) ProtoMessage() {} +func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{10, 0} +} + +func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b) +} +func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic) +} +func (m *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_BlockVolume.Merge(m, src) +} +func (m *VolumeCapability_BlockVolume) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m) +} +func (m *VolumeCapability_BlockVolume) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_BlockVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_BlockVolume proto.InternalMessageInfo + +// Indicate that the volume will be accessed via the filesystem API. +type VolumeCapability_MountVolume struct { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType,proto3" json:"fs_type,omitempty"` + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags,proto3" json:"mount_flags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } +func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_MountVolume) ProtoMessage() {} +func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{10, 1} +} + +func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b) +} +func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic) +} +func (m *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_MountVolume.Merge(m, src) +} +func (m *VolumeCapability_MountVolume) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_MountVolume.Size(m) +} +func (m *VolumeCapability_MountVolume) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_MountVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_MountVolume proto.InternalMessageInfo + +func (m *VolumeCapability_MountVolume) GetFsType() string { + if m != nil { + return m.FsType + } + return "" +} + +func (m *VolumeCapability_MountVolume) GetMountFlags() []string { + if m != nil { + return m.MountFlags + } + return nil +} + +// Specify how a volume can be accessed. +type VolumeCapability_AccessMode struct { + // This field is REQUIRED. + Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=csi.v1.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } +func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_AccessMode) ProtoMessage() {} +func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{10, 2} +} + +func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b) +} +func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic) +} +func (m *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_AccessMode.Merge(m, src) +} +func (m *VolumeCapability_AccessMode) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_AccessMode.Size(m) +} +func (m *VolumeCapability_AccessMode) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_AccessMode.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_AccessMode proto.InternalMessageInfo + +func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { + if m != nil { + return m.Mode + } + return VolumeCapability_AccessMode_UNKNOWN +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +type CapacityRange struct { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + RequiredBytes int64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes,proto3" json:"required_bytes,omitempty"` + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + LimitBytes int64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes,proto3" json:"limit_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapacityRange) Reset() { *m = CapacityRange{} } +func (m *CapacityRange) String() string { return proto.CompactTextString(m) } +func (*CapacityRange) ProtoMessage() {} +func (*CapacityRange) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{11} +} + +func (m *CapacityRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapacityRange.Unmarshal(m, b) +} +func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic) +} +func (m *CapacityRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRange.Merge(m, src) +} +func (m *CapacityRange) XXX_Size() int { + return xxx_messageInfo_CapacityRange.Size(m) +} +func (m *CapacityRange) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRange.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRange proto.InternalMessageInfo + +func (m *CapacityRange) GetRequiredBytes() int64 { + if m != nil { + return m.RequiredBytes + } + return 0 +} + +func (m *CapacityRange) GetLimitBytes() int64 { + if m != nil { + return m.LimitBytes + } + return 0 +} + +// Information about a specific volume. +type Volume struct { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + VolumeContext map[string]string `protobuf:"bytes,3,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + ContentSource *VolumeContentSource `protobuf:"bytes,4,opt,name=content_source,json=contentSource,proto3" json:"content_source,omitempty"` + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // MAY schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (m *Volume) String() string { return proto.CompactTextString(m) } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{12} +} + +func (m *Volume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Volume.Unmarshal(m, b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Volume.Marshal(b, m, deterministic) +} +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return xxx_messageInfo_Volume.Size(m) +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) +} + +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *Volume) GetCapacityBytes() int64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +func (m *Volume) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *Volume) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +func (m *Volume) GetContentSource() *VolumeContentSource { + if m != nil { + return m.ContentSource + } + return nil +} + +func (m *Volume) GetAccessibleTopology() []*Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type TopologyRequirement struct { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []*Topology `protobuf:"bytes,1,rep,name=requisite,proto3" json:"requisite,omitempty"` + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []*Topology `protobuf:"bytes,2,rep,name=preferred,proto3" json:"preferred,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyRequirement) Reset() { *m = TopologyRequirement{} } +func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) } +func (*TopologyRequirement) ProtoMessage() {} +func (*TopologyRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{13} +} + +func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b) +} +func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic) +} +func (m *TopologyRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyRequirement.Merge(m, src) +} +func (m *TopologyRequirement) XXX_Size() int { + return xxx_messageInfo_TopologyRequirement.Size(m) +} +func (m *TopologyRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyRequirement proto.InternalMessageInfo + +func (m *TopologyRequirement) GetRequisite() []*Topology { + if m != nil { + return m.Requisite + } + return nil +} + +func (m *TopologyRequirement) GetPreferred() []*Topology { + if m != nil { + return m.Preferred + } + return nil +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Topology) Reset() { *m = Topology{} } +func (m *Topology) String() string { return proto.CompactTextString(m) } +func (*Topology) ProtoMessage() {} +func (*Topology) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{14} +} + +func (m *Topology) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Topology.Unmarshal(m, b) +} +func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Topology.Marshal(b, m, deterministic) +} +func (m *Topology) XXX_Merge(src proto.Message) { + xxx_messageInfo_Topology.Merge(m, src) +} +func (m *Topology) XXX_Size() int { + return xxx_messageInfo_Topology.Size(m) +} +func (m *Topology) XXX_DiscardUnknown() { + xxx_messageInfo_Topology.DiscardUnknown(m) +} + +var xxx_messageInfo_Topology proto.InternalMessageInfo + +func (m *Topology) GetSegments() map[string]string { + if m != nil { + return m.Segments + } + return nil +} + +type DeleteVolumeRequest struct { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } +func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeRequest) ProtoMessage() {} +func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{15} +} + +func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b) +} +func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic) +} +func (m *DeleteVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeRequest.Merge(m, src) +} +func (m *DeleteVolumeRequest) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeRequest.Size(m) +} +func (m *DeleteVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeRequest proto.InternalMessageInfo + +func (m *DeleteVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *DeleteVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type DeleteVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } +func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeResponse) ProtoMessage() {} +func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{16} +} + +func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b) +} +func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic) +} +func (m *DeleteVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeResponse.Merge(m, src) +} +func (m *DeleteVolumeResponse) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeResponse.Size(m) +} +func (m *DeleteVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeResponse proto.InternalMessageInfo + +type ControllerPublishVolumeRequest struct { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. + Readonly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"` + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } +func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeRequest) ProtoMessage() {} +func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{17} +} + +func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic) +} +func (m *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeRequest.Merge(m, src) +} +func (m *ControllerPublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m) +} +func (m *ControllerPublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerPublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerPublishVolumeRequest proto.InternalMessageInfo + +func (m *ControllerPublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *ControllerPublishVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +type ControllerPublishVolumeResponse struct { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + PublishContext map[string]string `protobuf:"bytes,1,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } +func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeResponse) ProtoMessage() {} +func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{18} +} + +func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic) +} +func (m *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeResponse.Merge(m, src) +} +func (m *ControllerPublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m) +} +func (m *ControllerPublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerPublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerPublishVolumeResponse proto.InternalMessageInfo + +func (m *ControllerPublishVolumeResponse) GetPublishContext() map[string]string { + if m != nil { + return m.PublishContext + } + return nil +} + +type ControllerUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } +func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} +func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{19} +} + +func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(m, src) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m) +} +func (m *ControllerUnpublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerUnpublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerUnpublishVolumeRequest proto.InternalMessageInfo + +func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type ControllerUnpublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } +func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} +func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{20} +} + +func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(m, src) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m) +} +func (m *ControllerUnpublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerUnpublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerUnpublishVolumeResponse proto.InternalMessageInfo + +type ValidateVolumeCapabilitiesRequest struct { + // The ID of the volume to check. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,2,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "confirmed" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } +func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{21} +} + +func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(m, src) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesRequest proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type ValidateVolumeCapabilitiesResponse struct { + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed *ValidateVolumeCapabilitiesResponse_Confirmed `protobuf:"bytes,1,opt,name=confirmed,proto3" json:"confirmed,omitempty"` + // Message to the CO if `confirmed` above is empty. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } +func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{22} +} + +func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(m, src) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesResponse proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesResponse) GetConfirmed() *ValidateVolumeCapabilitiesResponse_Confirmed { + if m != nil { + return m.Confirmed + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +type ValidateVolumeCapabilitiesResponse_Confirmed struct { + // Volume context validated by the plugin. + // This field is OPTIONAL. + VolumeContext map[string]string `protobuf:"bytes,1,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) Reset() { + *m = ValidateVolumeCapabilitiesResponse_Confirmed{} +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) String() string { + return proto.CompactTextString(m) +} +func (*ValidateVolumeCapabilitiesResponse_Confirmed) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse_Confirmed) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{22, 0} +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Marshal(b, m, deterministic) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Merge(m, src) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Size(m) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type ListVolumesRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } +func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } +func (*ListVolumesRequest) ProtoMessage() {} +func (*ListVolumesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{23} +} + +func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b) +} +func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic) +} +func (m *ListVolumesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesRequest.Merge(m, src) +} +func (m *ListVolumesRequest) XXX_Size() int { + return xxx_messageInfo_ListVolumesRequest.Size(m) +} +func (m *ListVolumesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo + +func (m *ListVolumesRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListVolumesRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +type ListVolumesResponse struct { + Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } +func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse) ProtoMessage() {} +func (*ListVolumesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{24} +} + +func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b) +} +func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic) +} +func (m *ListVolumesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse.Merge(m, src) +} +func (m *ListVolumesResponse) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse.Size(m) +} +func (m *ListVolumesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse proto.InternalMessageInfo + +func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListVolumesResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListVolumesResponse_VolumeStatus struct { + // A list of all `node_id` of nodes that the volume in this entry + // is controller published on. + // This field is OPTIONAL. If it is not specified and the SP has + // the LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO + // MAY assume the volume is not controller published to any nodes. + // If the field is not specified and the SP does not have the + // LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO MUST + // not interpret this field. + // published_node_ids MAY include nodes not published to or + // reported by the SP. The CO MUST be resilient to that. + PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse_VolumeStatus) Reset() { *m = ListVolumesResponse_VolumeStatus{} } +func (m *ListVolumesResponse_VolumeStatus) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_VolumeStatus) ProtoMessage() {} +func (*ListVolumesResponse_VolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{24, 0} +} + +func (m *ListVolumesResponse_VolumeStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Unmarshal(m, b) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Marshal(b, m, deterministic) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse_VolumeStatus.Merge(m, src) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Size(m) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse_VolumeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse_VolumeStatus proto.InternalMessageInfo + +func (m *ListVolumesResponse_VolumeStatus) GetPublishedNodeIds() []string { + if m != nil { + return m.PublishedNodeIds + } + return nil +} + +type ListVolumesResponse_Entry struct { + // This field is REQUIRED + Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` + // This field is OPTIONAL. This field MUST be specified if the + // LIST_VOLUMES_PUBLISHED_NODES controller capability is + // supported. + Status *ListVolumesResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } +func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_Entry) ProtoMessage() {} +func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{24, 1} +} + +func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b) +} +func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic) +} +func (m *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse_Entry.Merge(m, src) +} +func (m *ListVolumesResponse_Entry) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse_Entry.Size(m) +} +func (m *ListVolumesResponse_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse_Entry proto.InternalMessageInfo + +func (m *ListVolumesResponse_Entry) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +func (m *ListVolumesResponse_Entry) GetStatus() *ListVolumesResponse_VolumeStatus { + if m != nil { + return m.Status + } + return nil +} + +type GetCapacityRequest struct { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,1,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } +func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } +func (*GetCapacityRequest) ProtoMessage() {} +func (*GetCapacityRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{25} +} + +func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b) +} +func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic) +} +func (m *GetCapacityRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityRequest.Merge(m, src) +} +func (m *GetCapacityRequest) XXX_Size() int { + return xxx_messageInfo_GetCapacityRequest.Size(m) +} +func (m *GetCapacityRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCapacityRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCapacityRequest proto.InternalMessageInfo + +func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *GetCapacityRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *GetCapacityRequest) GetAccessibleTopology() *Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type GetCapacityResponse struct { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + AvailableCapacity int64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity,proto3" json:"available_capacity,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } +func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } +func (*GetCapacityResponse) ProtoMessage() {} +func (*GetCapacityResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{26} +} + +func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b) +} +func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic) +} +func (m *GetCapacityResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityResponse.Merge(m, src) +} +func (m *GetCapacityResponse) XXX_Size() int { + return xxx_messageInfo_GetCapacityResponse.Size(m) +} +func (m *GetCapacityResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetCapacityResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCapacityResponse proto.InternalMessageInfo + +func (m *GetCapacityResponse) GetAvailableCapacity() int64 { + if m != nil { + return m.AvailableCapacity + } + return 0 +} + +type ControllerGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } +func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} +func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{27} +} + +func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(m, src) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m) +} +func (m *ControllerGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerGetCapabilitiesRequest proto.InternalMessageInfo + +type ControllerGetCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*ControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } +func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} +func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{28} +} + +func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(m, src) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m) +} +func (m *ControllerGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the controller service. +type ControllerServiceCapability struct { + // Types that are valid to be assigned to Type: + // *ControllerServiceCapability_Rpc + Type isControllerServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } +func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability) ProtoMessage() {} +func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{29} +} + +func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b) +} +func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic) +} +func (m *ControllerServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability.Merge(m, src) +} +func (m *ControllerServiceCapability) XXX_Size() int { + return xxx_messageInfo_ControllerServiceCapability.Size(m) +} +func (m *ControllerServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerServiceCapability proto.InternalMessageInfo + +type isControllerServiceCapability_Type interface { + isControllerServiceCapability_Type() +} + +type ControllerServiceCapability_Rpc struct { + Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` +} + +func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} + +func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { + if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ControllerServiceCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ControllerServiceCapability_Rpc)(nil), + } +} + +type ControllerServiceCapability_RPC struct { + Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } +func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability_RPC) ProtoMessage() {} +func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{29, 0} +} + +func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b) +} +func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (m *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability_RPC.Merge(m, src) +} +func (m *ControllerServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m) +} +func (m *ControllerServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerServiceCapability_RPC proto.InternalMessageInfo + +func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return ControllerServiceCapability_RPC_UNKNOWN +} + +type CreateSnapshotRequest struct { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + SourceVolumeId string `protobuf:"bytes,1,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } +func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotRequest) ProtoMessage() {} +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{30} +} + +func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b) +} +func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic) +} +func (m *CreateSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotRequest.Merge(m, src) +} +func (m *CreateSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotRequest.Size(m) +} +func (m *CreateSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo + +func (m *CreateSnapshotRequest) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *CreateSnapshotRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateSnapshotRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *CreateSnapshotRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type CreateSnapshotResponse struct { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotResponse) Reset() { *m = CreateSnapshotResponse{} } +func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotResponse) ProtoMessage() {} +func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{31} +} + +func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b) +} +func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic) +} +func (m *CreateSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotResponse.Merge(m, src) +} +func (m *CreateSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotResponse.Size(m) +} +func (m *CreateSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotResponse proto.InternalMessageInfo + +func (m *CreateSnapshotResponse) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +// Information about a specific snapshot. +type Snapshot struct { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + SnapshotId string `protobuf:"bytes,2,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` + // Timestamp when the point-in-time snapshot is taken on the storage + // system. This field is REQUIRED. + CreationTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + ReadyToUse bool `protobuf:"varint,5,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{32} +} + +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Snapshot.Unmarshal(m, b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) +} +func (m *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(m, src) +} +func (m *Snapshot) XXX_Size() int { + return xxx_messageInfo_Snapshot.Size(m) +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetSizeBytes() int64 { + if m != nil { + return m.SizeBytes + } + return 0 +} + +func (m *Snapshot) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *Snapshot) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *Snapshot) GetCreationTime() *timestamp.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *Snapshot) GetReadyToUse() bool { + if m != nil { + return m.ReadyToUse + } + return false +} + +type DeleteSnapshotRequest struct { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } +func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotRequest) ProtoMessage() {} +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{33} +} + +func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b) +} +func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic) +} +func (m *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotRequest.Merge(m, src) +} +func (m *DeleteSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotRequest.Size(m) +} +func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo + +func (m *DeleteSnapshotRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *DeleteSnapshotRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type DeleteSnapshotResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotResponse) Reset() { *m = DeleteSnapshotResponse{} } +func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotResponse) ProtoMessage() {} +func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{34} +} + +func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b) +} +func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic) +} +func (m *DeleteSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotResponse.Merge(m, src) +} +func (m *DeleteSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotResponse.Size(m) +} +func (m *DeleteSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotResponse proto.InternalMessageInfo + +// List all snapshots on the storage system regardless of how they were +// created. +type ListSnapshotsRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"` + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being processed after + // it is cut. + SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // Secrets required by plugin to complete ListSnapshot request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } +func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsRequest) ProtoMessage() {} +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{35} +} + +func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b) +} +func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) +} +func (m *ListSnapshotsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsRequest.Merge(m, src) +} +func (m *ListSnapshotsRequest) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsRequest.Size(m) +} +func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo + +func (m *ListSnapshotsRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListSnapshotsRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +func (m *ListSnapshotsRequest) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *ListSnapshotsRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *ListSnapshotsRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type ListSnapshotsResponse struct { + Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } +func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse) ProtoMessage() {} +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{36} +} + +func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) +} +func (m *ListSnapshotsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse.Merge(m, src) +} +func (m *ListSnapshotsResponse) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse.Size(m) +} +func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo + +func (m *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListSnapshotsResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListSnapshotsResponse_Entry struct { + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse_Entry) Reset() { *m = ListSnapshotsResponse_Entry{} } +func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse_Entry) ProtoMessage() {} +func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{36, 0} +} + +func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic) +} +func (m *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(m, src) +} +func (m *ListSnapshotsResponse_Entry) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m) +} +func (m *ListSnapshotsResponse_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse_Entry proto.InternalMessageInfo + +func (m *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type ControllerExpandVolumeRequest struct { + // The ID of the volume to expand. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // This allows CO to specify the capacity requirements of the volume + // after expansion. This field is REQUIRED. + CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"` + // Secrets required by the plugin for expanding the volume. + // This field is OPTIONAL. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is + // being used as a block device - the SP MAY set + // node_expansion_required to false in ControllerExpandVolumeResponse + // to skip invocation of NodeExpandVolume on the node by the CO. + // This is an OPTIONAL field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerExpandVolumeRequest) Reset() { *m = ControllerExpandVolumeRequest{} } +func (m *ControllerExpandVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerExpandVolumeRequest) ProtoMessage() {} +func (*ControllerExpandVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{37} +} + +func (m *ControllerExpandVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerExpandVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerExpandVolumeRequest.Marshal(b, m, deterministic) +} +func (m *ControllerExpandVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerExpandVolumeRequest.Merge(m, src) +} +func (m *ControllerExpandVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerExpandVolumeRequest.Size(m) +} +func (m *ControllerExpandVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerExpandVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerExpandVolumeRequest proto.InternalMessageInfo + +func (m *ControllerExpandVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerExpandVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *ControllerExpandVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *ControllerExpandVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +type ControllerExpandVolumeResponse struct { + // Capacity of volume after expansion. This field is REQUIRED. + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` + // Whether node expansion is required for the volume. When true + // the CO MUST make NodeExpandVolume RPC call on the node. This field + // is REQUIRED. + NodeExpansionRequired bool `protobuf:"varint,2,opt,name=node_expansion_required,json=nodeExpansionRequired,proto3" json:"node_expansion_required,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerExpandVolumeResponse) Reset() { *m = ControllerExpandVolumeResponse{} } +func (m *ControllerExpandVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerExpandVolumeResponse) ProtoMessage() {} +func (*ControllerExpandVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{38} +} + +func (m *ControllerExpandVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerExpandVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerExpandVolumeResponse.Marshal(b, m, deterministic) +} +func (m *ControllerExpandVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerExpandVolumeResponse.Merge(m, src) +} +func (m *ControllerExpandVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerExpandVolumeResponse.Size(m) +} +func (m *ControllerExpandVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerExpandVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerExpandVolumeResponse proto.InternalMessageInfo + +func (m *ControllerExpandVolumeResponse) GetCapacityBytes() int64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +func (m *ControllerExpandVolumeResponse) GetNodeExpansionRequired() bool { + if m != nil { + return m.NodeExpansionRequired + } + return false +} + +type NodeStageVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishContext map[string]string `protobuf:"bytes,2,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The path to which the volume MAY be staged. It MUST be an + // absolute path in the root filesystem of the process serving this + // request, and MUST be a directory. The CO SHALL ensure that there + // is only one `staging_target_path` per volume. The CO SHALL ensure + // that the path is directory and that the process serving the + // request has `read` and `write` permission to that directory. The + // CO SHALL be responsible for creating the directory if it does not + // exist. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeStageVolumeRequest) Reset() { *m = NodeStageVolumeRequest{} } +func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeRequest) ProtoMessage() {} +func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{39} +} + +func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b) +} +func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic) +} +func (m *NodeStageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeRequest.Merge(m, src) +} +func (m *NodeStageVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeStageVolumeRequest.Size(m) +} +func (m *NodeStageVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStageVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStageVolumeRequest proto.InternalMessageInfo + +func (m *NodeStageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeStageVolumeRequest) GetPublishContext() map[string]string { + if m != nil { + return m.PublishContext + } + return nil +} + +func (m *NodeStageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodeStageVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *NodeStageVolumeRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +type NodeStageVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeStageVolumeResponse) Reset() { *m = NodeStageVolumeResponse{} } +func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeResponse) ProtoMessage() {} +func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{40} +} + +func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b) +} +func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic) +} +func (m *NodeStageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeResponse.Merge(m, src) +} +func (m *NodeStageVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeStageVolumeResponse.Size(m) +} +func (m *NodeStageVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStageVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStageVolumeResponse proto.InternalMessageInfo + +type NodeUnstageVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The path at which the volume was staged. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnstageVolumeRequest) Reset() { *m = NodeUnstageVolumeRequest{} } +func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeRequest) ProtoMessage() {} +func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{41} +} + +func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b) +} +func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic) +} +func (m *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeRequest.Merge(m, src) +} +func (m *NodeUnstageVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m) +} +func (m *NodeUnstageVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnstageVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnstageVolumeRequest proto.InternalMessageInfo + +func (m *NodeUnstageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +type NodeUnstageVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnstageVolumeResponse) Reset() { *m = NodeUnstageVolumeResponse{} } +func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeResponse) ProtoMessage() {} +func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{42} +} + +func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b) +} +func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic) +} +func (m *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeResponse.Merge(m, src) +} +func (m *NodeUnstageVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m) +} +func (m *NodeUnstageVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnstageVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnstageVolumeResponse proto.InternalMessageInfo + +type NodePublishVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishContext map[string]string `protobuf:"bytes,2,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + // For volumes with an access type of block, the SP SHALL place the + // block device at target_path. + // For volumes with an access type of mount, the SP SHALL place the + // mounted directory at target_path. + // Creation of target_path is the responsibility of the SP. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. + Readonly bool `protobuf:"varint,6,opt,name=readonly,proto3" json:"readonly,omitempty"` + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,7,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,8,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } +func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeRequest) ProtoMessage() {} +func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{43} +} + +func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b) +} +func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic) +} +func (m *NodePublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeRequest.Merge(m, src) +} +func (m *NodePublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodePublishVolumeRequest.Size(m) +} +func (m *NodePublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodePublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePublishVolumeRequest proto.InternalMessageInfo + +func (m *NodePublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodePublishVolumeRequest) GetPublishContext() map[string]string { + if m != nil { + return m.PublishContext + } + return nil +} + +func (m *NodePublishVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodePublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *NodePublishVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *NodePublishVolumeRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +type NodePublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } +func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeResponse) ProtoMessage() {} +func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{44} +} + +func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b) +} +func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic) +} +func (m *NodePublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeResponse.Merge(m, src) +} +func (m *NodePublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodePublishVolumeResponse.Size(m) +} +func (m *NodePublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodePublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePublishVolumeResponse proto.InternalMessageInfo + +type NodeUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // The SP MUST delete the file or directory it created at this path. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } +func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeRequest) ProtoMessage() {} +func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{45} +} + +func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b) +} +func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic) +} +func (m *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(m, src) +} +func (m *NodeUnpublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m) +} +func (m *NodeUnpublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnpublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnpublishVolumeRequest proto.InternalMessageInfo + +func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +type NodeUnpublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } +func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeResponse) ProtoMessage() {} +func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{46} +} + +func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b) +} +func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic) +} +func (m *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(m, src) +} +func (m *NodeUnpublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m) +} +func (m *NodeUnpublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnpublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnpublishVolumeResponse proto.InternalMessageInfo + +type NodeGetVolumeStatsRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. + // This is a REQUIRED field. + VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"` + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetVolumeStatsRequest) Reset() { *m = NodeGetVolumeStatsRequest{} } +func (m *NodeGetVolumeStatsRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetVolumeStatsRequest) ProtoMessage() {} +func (*NodeGetVolumeStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{47} +} + +func (m *NodeGetVolumeStatsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetVolumeStatsRequest.Unmarshal(m, b) +} +func (m *NodeGetVolumeStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetVolumeStatsRequest.Marshal(b, m, deterministic) +} +func (m *NodeGetVolumeStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetVolumeStatsRequest.Merge(m, src) +} +func (m *NodeGetVolumeStatsRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetVolumeStatsRequest.Size(m) +} +func (m *NodeGetVolumeStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetVolumeStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetVolumeStatsRequest proto.InternalMessageInfo + +func (m *NodeGetVolumeStatsRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeGetVolumeStatsRequest) GetVolumePath() string { + if m != nil { + return m.VolumePath + } + return "" +} + +func (m *NodeGetVolumeStatsRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +type NodeGetVolumeStatsResponse struct { + // This field is OPTIONAL. + Usage []*VolumeUsage `protobuf:"bytes,1,rep,name=usage,proto3" json:"usage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetVolumeStatsResponse) Reset() { *m = NodeGetVolumeStatsResponse{} } +func (m *NodeGetVolumeStatsResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetVolumeStatsResponse) ProtoMessage() {} +func (*NodeGetVolumeStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{48} +} + +func (m *NodeGetVolumeStatsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetVolumeStatsResponse.Unmarshal(m, b) +} +func (m *NodeGetVolumeStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetVolumeStatsResponse.Marshal(b, m, deterministic) +} +func (m *NodeGetVolumeStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetVolumeStatsResponse.Merge(m, src) +} +func (m *NodeGetVolumeStatsResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetVolumeStatsResponse.Size(m) +} +func (m *NodeGetVolumeStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetVolumeStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetVolumeStatsResponse proto.InternalMessageInfo + +func (m *NodeGetVolumeStatsResponse) GetUsage() []*VolumeUsage { + if m != nil { + return m.Usage + } + return nil +} + +type VolumeUsage struct { + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + Available int64 `protobuf:"varint,1,opt,name=available,proto3" json:"available,omitempty"` + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + Total int64 `protobuf:"varint,2,opt,name=total,proto3" json:"total,omitempty"` + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + Used int64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + // Units by which values are measured. This field is REQUIRED. + Unit VolumeUsage_Unit `protobuf:"varint,4,opt,name=unit,proto3,enum=csi.v1.VolumeUsage_Unit" json:"unit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeUsage) Reset() { *m = VolumeUsage{} } +func (m *VolumeUsage) String() string { return proto.CompactTextString(m) } +func (*VolumeUsage) ProtoMessage() {} +func (*VolumeUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{49} +} + +func (m *VolumeUsage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeUsage.Unmarshal(m, b) +} +func (m *VolumeUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeUsage.Marshal(b, m, deterministic) +} +func (m *VolumeUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeUsage.Merge(m, src) +} +func (m *VolumeUsage) XXX_Size() int { + return xxx_messageInfo_VolumeUsage.Size(m) +} +func (m *VolumeUsage) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeUsage proto.InternalMessageInfo + +func (m *VolumeUsage) GetAvailable() int64 { + if m != nil { + return m.Available + } + return 0 +} + +func (m *VolumeUsage) GetTotal() int64 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *VolumeUsage) GetUsed() int64 { + if m != nil { + return m.Used + } + return 0 +} + +func (m *VolumeUsage) GetUnit() VolumeUsage_Unit { + if m != nil { + return m.Unit + } + return VolumeUsage_UNKNOWN +} + +type NodeGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } +func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesRequest) ProtoMessage() {} +func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{50} +} + +func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (m *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(m, src) +} +func (m *NodeGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m) +} +func (m *NodeGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetCapabilitiesRequest proto.InternalMessageInfo + +type NodeGetCapabilitiesResponse struct { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } +func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesResponse) ProtoMessage() {} +func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{51} +} + +func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (m *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(m, src) +} +func (m *NodeGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m) +} +func (m *NodeGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the node service. +type NodeServiceCapability struct { + // Types that are valid to be assigned to Type: + // *NodeServiceCapability_Rpc + Type isNodeServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } +func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability) ProtoMessage() {} +func (*NodeServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{52} +} + +func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b) +} +func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic) +} +func (m *NodeServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability.Merge(m, src) +} +func (m *NodeServiceCapability) XXX_Size() int { + return xxx_messageInfo_NodeServiceCapability.Size(m) +} +func (m *NodeServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_NodeServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeServiceCapability proto.InternalMessageInfo + +type isNodeServiceCapability_Type interface { + isNodeServiceCapability_Type() +} + +type NodeServiceCapability_Rpc struct { + Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` +} + +func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} + +func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { + if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*NodeServiceCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*NodeServiceCapability_Rpc)(nil), + } +} + +type NodeServiceCapability_RPC struct { + Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.NodeServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } +func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability_RPC) ProtoMessage() {} +func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{52, 0} +} + +func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b) +} +func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (m *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability_RPC.Merge(m, src) +} +func (m *NodeServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_NodeServiceCapability_RPC.Size(m) +} +func (m *NodeServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_NodeServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeServiceCapability_RPC proto.InternalMessageInfo + +func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return NodeServiceCapability_RPC_UNKNOWN +} + +type NodeGetInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetInfoRequest) Reset() { *m = NodeGetInfoRequest{} } +func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetInfoRequest) ProtoMessage() {} +func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{53} +} + +func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b) +} +func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic) +} +func (m *NodeGetInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoRequest.Merge(m, src) +} +func (m *NodeGetInfoRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetInfoRequest.Size(m) +} +func (m *NodeGetInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetInfoRequest proto.InternalMessageInfo + +type NodeGetInfoResponse struct { + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + MaxVolumesPerNode int64 `protobuf:"varint,2,opt,name=max_volumes_per_node,json=maxVolumesPerNode,proto3" json:"max_volumes_per_node,omitempty"` + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "Z2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetInfoResponse) Reset() { *m = NodeGetInfoResponse{} } +func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetInfoResponse) ProtoMessage() {} +func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{54} +} + +func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b) +} +func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic) +} +func (m *NodeGetInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoResponse.Merge(m, src) +} +func (m *NodeGetInfoResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetInfoResponse.Size(m) +} +func (m *NodeGetInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetInfoResponse proto.InternalMessageInfo + +func (m *NodeGetInfoResponse) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 { + if m != nil { + return m.MaxVolumesPerNode + } + return 0 +} + +func (m *NodeGetInfoResponse) GetAccessibleTopology() *Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type NodeExpandVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The path on which volume is available. This field is REQUIRED. + VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"` + // This allows CO to specify the capacity requirements of the volume + // after expansion. If capacity_range is omitted then a plugin MAY + // inspect the file system of the volume to determine the maximum + // capacity to which the volume can be expanded. In such cases a + // plugin MAY expand the volume to its maximum capacity. + // This field is OPTIONAL. + CapacityRange *CapacityRange `protobuf:"bytes,3,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"` + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + StagingTargetPath string `protobuf:"bytes,4,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is being + // used as a block device the SP MAY choose to skip expanding the + // filesystem in NodeExpandVolume implementation but still perform + // rest of the housekeeping needed for expanding the volume. If + // volume_capability is omitted the SP MAY determine + // access_type from given volume_path for the volume and perform + // node expansion. This is an OPTIONAL field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeExpandVolumeRequest) Reset() { *m = NodeExpandVolumeRequest{} } +func (m *NodeExpandVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeExpandVolumeRequest) ProtoMessage() {} +func (*NodeExpandVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{55} +} + +func (m *NodeExpandVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeExpandVolumeRequest.Unmarshal(m, b) +} +func (m *NodeExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeExpandVolumeRequest.Marshal(b, m, deterministic) +} +func (m *NodeExpandVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeExpandVolumeRequest.Merge(m, src) +} +func (m *NodeExpandVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeExpandVolumeRequest.Size(m) +} +func (m *NodeExpandVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeExpandVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeExpandVolumeRequest proto.InternalMessageInfo + +func (m *NodeExpandVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeExpandVolumeRequest) GetVolumePath() string { + if m != nil { + return m.VolumePath + } + return "" +} + +func (m *NodeExpandVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *NodeExpandVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodeExpandVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +type NodeExpandVolumeResponse struct { + // The capacity of the volume in bytes. This field is OPTIONAL. + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeExpandVolumeResponse) Reset() { *m = NodeExpandVolumeResponse{} } +func (m *NodeExpandVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeExpandVolumeResponse) ProtoMessage() {} +func (*NodeExpandVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{56} +} + +func (m *NodeExpandVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeExpandVolumeResponse.Unmarshal(m, b) +} +func (m *NodeExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeExpandVolumeResponse.Marshal(b, m, deterministic) +} +func (m *NodeExpandVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeExpandVolumeResponse.Merge(m, src) +} +func (m *NodeExpandVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeExpandVolumeResponse.Size(m) +} +func (m *NodeExpandVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeExpandVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeExpandVolumeResponse proto.InternalMessageInfo + +func (m *NodeExpandVolumeResponse) GetCapacityBytes() int64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +var E_CsiSecret = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1059, + Name: "csi.v1.csi_secret", + Tag: "varint,1059,opt,name=csi_secret", + Filename: "github.com/container-storage-interface/spec/csi.proto", +} + +func init() { + proto.RegisterEnum("csi.v1.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) + proto.RegisterEnum("csi.v1.PluginCapability_VolumeExpansion_Type", PluginCapability_VolumeExpansion_Type_name, PluginCapability_VolumeExpansion_Type_value) + proto.RegisterEnum("csi.v1.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) + proto.RegisterEnum("csi.v1.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.v1.VolumeUsage_Unit", VolumeUsage_Unit_name, VolumeUsage_Unit_value) + proto.RegisterEnum("csi.v1.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) + proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v1.GetPluginInfoRequest") + proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v1.GetPluginInfoResponse") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetPluginInfoResponse.ManifestEntry") + proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v1.GetPluginCapabilitiesRequest") + proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v1.GetPluginCapabilitiesResponse") + proto.RegisterType((*PluginCapability)(nil), "csi.v1.PluginCapability") + proto.RegisterType((*PluginCapability_Service)(nil), "csi.v1.PluginCapability.Service") + proto.RegisterType((*PluginCapability_VolumeExpansion)(nil), "csi.v1.PluginCapability.VolumeExpansion") + proto.RegisterType((*ProbeRequest)(nil), "csi.v1.ProbeRequest") + proto.RegisterType((*ProbeResponse)(nil), "csi.v1.ProbeResponse") + proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v1.CreateVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.SecretsEntry") + proto.RegisterType((*VolumeContentSource)(nil), "csi.v1.VolumeContentSource") + proto.RegisterType((*VolumeContentSource_SnapshotSource)(nil), "csi.v1.VolumeContentSource.SnapshotSource") + proto.RegisterType((*VolumeContentSource_VolumeSource)(nil), "csi.v1.VolumeContentSource.VolumeSource") + proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v1.CreateVolumeResponse") + proto.RegisterType((*VolumeCapability)(nil), "csi.v1.VolumeCapability") + proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v1.VolumeCapability.BlockVolume") + proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v1.VolumeCapability.MountVolume") + proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v1.VolumeCapability.AccessMode") + proto.RegisterType((*CapacityRange)(nil), "csi.v1.CapacityRange") + proto.RegisterType((*Volume)(nil), "csi.v1.Volume") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.Volume.VolumeContextEntry") + proto.RegisterType((*TopologyRequirement)(nil), "csi.v1.TopologyRequirement") + proto.RegisterType((*Topology)(nil), "csi.v1.Topology") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.Topology.SegmentsEntry") + proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v1.DeleteVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteVolumeRequest.SecretsEntry") + proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v1.DeleteVolumeResponse") + proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v1.ControllerPublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry") + proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v1.ControllerPublishVolumeResponse") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeResponse.PublishContextEntry") + proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v1.ControllerUnpublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry") + proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v1.ControllerUnpublishVolumeResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse_Confirmed)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry") + proto.RegisterType((*ListVolumesRequest)(nil), "csi.v1.ListVolumesRequest") + proto.RegisterType((*ListVolumesResponse)(nil), "csi.v1.ListVolumesResponse") + proto.RegisterType((*ListVolumesResponse_VolumeStatus)(nil), "csi.v1.ListVolumesResponse.VolumeStatus") + proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v1.ListVolumesResponse.Entry") + proto.RegisterType((*GetCapacityRequest)(nil), "csi.v1.GetCapacityRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetCapacityRequest.ParametersEntry") + proto.RegisterType((*GetCapacityResponse)(nil), "csi.v1.GetCapacityResponse") + proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v1.ControllerGetCapabilitiesRequest") + proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v1.ControllerGetCapabilitiesResponse") + proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v1.ControllerServiceCapability") + proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v1.ControllerServiceCapability.RPC") + proto.RegisterType((*CreateSnapshotRequest)(nil), "csi.v1.CreateSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.SecretsEntry") + proto.RegisterType((*CreateSnapshotResponse)(nil), "csi.v1.CreateSnapshotResponse") + proto.RegisterType((*Snapshot)(nil), "csi.v1.Snapshot") + proto.RegisterType((*DeleteSnapshotRequest)(nil), "csi.v1.DeleteSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteSnapshotRequest.SecretsEntry") + proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v1.DeleteSnapshotResponse") + proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v1.ListSnapshotsRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ListSnapshotsRequest.SecretsEntry") + proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v1.ListSnapshotsResponse") + proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v1.ListSnapshotsResponse.Entry") + proto.RegisterType((*ControllerExpandVolumeRequest)(nil), "csi.v1.ControllerExpandVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerExpandVolumeRequest.SecretsEntry") + proto.RegisterType((*ControllerExpandVolumeResponse)(nil), "csi.v1.ControllerExpandVolumeResponse") + proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v1.NodeStageVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.PublishContextEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.VolumeContextEntry") + proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v1.NodeStageVolumeResponse") + proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v1.NodeUnstageVolumeRequest") + proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v1.NodeUnstageVolumeResponse") + proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v1.NodePublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.PublishContextEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.VolumeContextEntry") + proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v1.NodePublishVolumeResponse") + proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v1.NodeUnpublishVolumeRequest") + proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v1.NodeUnpublishVolumeResponse") + proto.RegisterType((*NodeGetVolumeStatsRequest)(nil), "csi.v1.NodeGetVolumeStatsRequest") + proto.RegisterType((*NodeGetVolumeStatsResponse)(nil), "csi.v1.NodeGetVolumeStatsResponse") + proto.RegisterType((*VolumeUsage)(nil), "csi.v1.VolumeUsage") + proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v1.NodeGetCapabilitiesRequest") + proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v1.NodeGetCapabilitiesResponse") + proto.RegisterType((*NodeServiceCapability)(nil), "csi.v1.NodeServiceCapability") + proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v1.NodeServiceCapability.RPC") + proto.RegisterType((*NodeGetInfoRequest)(nil), "csi.v1.NodeGetInfoRequest") + proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v1.NodeGetInfoResponse") + proto.RegisterType((*NodeExpandVolumeRequest)(nil), "csi.v1.NodeExpandVolumeRequest") + proto.RegisterType((*NodeExpandVolumeResponse)(nil), "csi.v1.NodeExpandVolumeResponse") + proto.RegisterExtension(E_CsiSecret) +} + +func init() { + proto.RegisterFile("github.com/container-storage-interface/spec/csi.proto", fileDescriptor_9cdb00adce470e01) +} + +var fileDescriptor_9cdb00adce470e01 = []byte{ + // 3366 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4d, 0x70, 0xdb, 0xc6, + 0xf5, 0x27, 0xf8, 0x25, 0xea, 0xe9, 0xc3, 0xf4, 0xea, 0xc3, 0x34, 0x24, 0xd9, 0x32, 0x1c, 0x3b, + 0xb2, 0x63, 0xd3, 0xff, 0x28, 0x71, 0xe6, 0x1f, 0x5b, 0x69, 0x43, 0x51, 0xb4, 0xc4, 0x98, 0xa6, + 0x14, 0x90, 0x92, 0x63, 0xb7, 0x19, 0x04, 0x22, 0x57, 0x34, 0x26, 0x24, 0xc0, 0x00, 0xa0, 0x2a, + 0xf5, 0xd2, 0x99, 0xf6, 0xd4, 0x69, 0xef, 0x6d, 0x4f, 0x9d, 0x49, 0x6f, 0x6d, 0x33, 0x39, 0x75, + 0x7a, 0xec, 0x4c, 0x0f, 0x3d, 0xf4, 0xd0, 0xe9, 0xad, 0x9d, 0x5e, 0x72, 0xed, 0x64, 0xda, 0x99, + 0x4c, 0x8f, 0x9d, 0x1e, 0x3a, 0xc0, 0x2e, 0x40, 0x2c, 0x08, 0x80, 0xa4, 0x65, 0x4f, 0x0e, 0x3d, + 0x49, 0x7c, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, 0xef, 0xbd, 0x7d, 0xef, 0xb7, 0x80, 0xbb, 0x2d, 0xc5, + 0x7c, 0xd6, 0x3b, 0xcc, 0x37, 0xb4, 0xce, 0x9d, 0x86, 0xa6, 0x9a, 0xb2, 0xa2, 0x62, 0xfd, 0xb6, + 0x61, 0x6a, 0xba, 0xdc, 0xc2, 0xb7, 0x15, 0xd5, 0xc4, 0xfa, 0x91, 0xdc, 0xc0, 0x77, 0x8c, 0x2e, + 0x6e, 0xdc, 0x69, 0x18, 0x4a, 0xbe, 0xab, 0x6b, 0xa6, 0x86, 0xd2, 0xd6, 0xbf, 0xc7, 0xaf, 0xf3, + 0xab, 0x2d, 0x4d, 0x6b, 0xb5, 0xf1, 0x1d, 0x9b, 0x7a, 0xd8, 0x3b, 0xba, 0xd3, 0xc4, 0x46, 0x43, + 0x57, 0xba, 0xa6, 0xa6, 0x13, 0x4e, 0xfe, 0xb2, 0x9f, 0xc3, 0x54, 0x3a, 0xd8, 0x30, 0xe5, 0x4e, + 0x97, 0x32, 0x5c, 0xf2, 0x33, 0x7c, 0x47, 0x97, 0xbb, 0x5d, 0xac, 0x1b, 0x64, 0x5c, 0x58, 0x84, + 0xf9, 0x6d, 0x6c, 0xee, 0xb5, 0x7b, 0x2d, 0x45, 0x2d, 0xab, 0x47, 0x9a, 0x88, 0x3f, 0xe9, 0x61, + 0xc3, 0x14, 0xfe, 0xca, 0xc1, 0x82, 0x6f, 0xc0, 0xe8, 0x6a, 0xaa, 0x81, 0x11, 0x82, 0xa4, 0x2a, + 0x77, 0x70, 0x8e, 0x5b, 0xe5, 0xd6, 0x26, 0x45, 0xfb, 0x7f, 0x74, 0x0d, 0x66, 0x8f, 0xb1, 0xda, + 0xd4, 0x74, 0xe9, 0x18, 0xeb, 0x86, 0xa2, 0xa9, 0xb9, 0xb8, 0x3d, 0x3a, 0x43, 0xa8, 0x07, 0x84, + 0x88, 0xb6, 0x21, 0xd3, 0x91, 0x55, 0xe5, 0x08, 0x1b, 0x66, 0x2e, 0xb1, 0x9a, 0x58, 0x9b, 0x5a, + 0x7f, 0x2d, 0x4f, 0xb6, 0x9a, 0x0f, 0x5c, 0x2b, 0xff, 0x88, 0x72, 0x97, 0x54, 0x53, 0x3f, 0x15, + 0xdd, 0xc9, 0xfc, 0x7d, 0x98, 0x61, 0x86, 0x50, 0x16, 0x12, 0x1f, 0xe3, 0x53, 0xaa, 0x93, 0xf5, + 0x2f, 0x9a, 0x87, 0xd4, 0xb1, 0xdc, 0xee, 0x61, 0xaa, 0x09, 0xf9, 0x71, 0x2f, 0xfe, 0xff, 0x9c, + 0x70, 0x09, 0x96, 0xdd, 0xd5, 0x8a, 0x72, 0x57, 0x3e, 0x54, 0xda, 0x8a, 0xa9, 0x60, 0xc3, 0xd9, + 0xfa, 0x87, 0xb0, 0x12, 0x32, 0x4e, 0x2d, 0xb0, 0x01, 0xd3, 0x0d, 0x0f, 0x3d, 0xc7, 0xd9, 0x5b, + 0xc9, 0x39, 0x5b, 0xf1, 0xcd, 0x3c, 0x15, 0x19, 0x6e, 0xe1, 0x4f, 0x09, 0xc8, 0xfa, 0x59, 0xd0, + 0x06, 0x4c, 0x18, 0x58, 0x3f, 0x56, 0x1a, 0xc4, 0xae, 0x53, 0xeb, 0xab, 0x61, 0xd2, 0xf2, 0x35, + 0xc2, 0xb7, 0x13, 0x13, 0x9d, 0x29, 0x68, 0x1f, 0xb2, 0xc7, 0x5a, 0xbb, 0xd7, 0xc1, 0x12, 0x3e, + 0xe9, 0xca, 0xaa, 0x7b, 0x00, 0x53, 0xeb, 0x6b, 0xa1, 0x62, 0x0e, 0xec, 0x09, 0x25, 0x87, 0x7f, + 0x27, 0x26, 0x9e, 0x3b, 0x66, 0x49, 0xfc, 0x4f, 0x38, 0x98, 0xa0, 0xab, 0xa1, 0xb7, 0x21, 0x69, + 0x9e, 0x76, 0x89, 0x76, 0xb3, 0xeb, 0xd7, 0x86, 0x69, 0x97, 0xaf, 0x9f, 0x76, 0xb1, 0x68, 0x4f, + 0x11, 0xde, 0x87, 0xa4, 0xf5, 0x0b, 0x4d, 0xc1, 0xc4, 0x7e, 0xf5, 0x61, 0x75, 0xf7, 0x71, 0x35, + 0x1b, 0x43, 0x8b, 0x80, 0x8a, 0xbb, 0xd5, 0xba, 0xb8, 0x5b, 0xa9, 0x94, 0x44, 0xa9, 0x56, 0x12, + 0x0f, 0xca, 0xc5, 0x52, 0x96, 0x43, 0xaf, 0xc0, 0xea, 0xc1, 0x6e, 0x65, 0xff, 0x51, 0x49, 0x2a, + 0x14, 0x8b, 0xa5, 0x5a, 0xad, 0xbc, 0x59, 0xae, 0x94, 0xeb, 0x4f, 0xa4, 0xe2, 0x6e, 0xb5, 0x56, + 0x17, 0x0b, 0xe5, 0x6a, 0xbd, 0x96, 0x8d, 0xf3, 0xdf, 0xe7, 0xe0, 0x9c, 0x6f, 0x03, 0xa8, 0xc0, + 0x68, 0x78, 0x7b, 0xd4, 0x8d, 0x7b, 0x35, 0xbd, 0x15, 0xa4, 0x29, 0x40, 0x7a, 0xb7, 0x5a, 0x29, + 0x57, 0x2d, 0xed, 0xa6, 0x60, 0x62, 0xf7, 0xc1, 0x03, 0xfb, 0x47, 0x7c, 0x33, 0x4d, 0x16, 0x14, + 0x66, 0x61, 0x7a, 0x4f, 0xd7, 0x0e, 0xb1, 0xe3, 0x3f, 0x05, 0x98, 0xa1, 0xbf, 0xa9, 0xbf, 0xfc, + 0x1f, 0xa4, 0x74, 0x2c, 0x37, 0x4f, 0xe9, 0xd1, 0xf2, 0x79, 0x12, 0x93, 0x79, 0x27, 0x26, 0xf3, + 0x9b, 0x9a, 0xd6, 0x3e, 0xb0, 0xfc, 0x53, 0x24, 0x8c, 0xc2, 0x57, 0x49, 0x98, 0x2b, 0xea, 0x58, + 0x36, 0x31, 0xd1, 0x96, 0x8a, 0x0e, 0x8c, 0xbd, 0x0d, 0x98, 0xb5, 0xfc, 0xab, 0xa1, 0x98, 0xa7, + 0x92, 0x2e, 0xab, 0x2d, 0x4c, 0x8f, 0x7e, 0xc1, 0xb1, 0x40, 0x91, 0x8e, 0x8a, 0xd6, 0xa0, 0x38, + 0xd3, 0xf0, 0xfe, 0x44, 0x65, 0x98, 0xa3, 0xae, 0xc3, 0xb8, 0x74, 0x82, 0x75, 0x69, 0xa2, 0x85, + 0xc7, 0xa5, 0xd1, 0x31, 0x4b, 0x51, 0xb0, 0x81, 0x1e, 0x02, 0x74, 0x65, 0x5d, 0xee, 0x60, 0x13, + 0xeb, 0x46, 0x2e, 0xc9, 0xc6, 0x77, 0xc0, 0x6e, 0xf2, 0x7b, 0x2e, 0x37, 0x89, 0x6f, 0xcf, 0x74, + 0xb4, 0x6d, 0x05, 0x44, 0x43, 0xc7, 0xa6, 0x91, 0x4b, 0xd9, 0x92, 0xd6, 0xa2, 0x24, 0xd5, 0x08, + 0xab, 0x2d, 0x66, 0x33, 0xf1, 0xd3, 0x4d, 0x4e, 0x74, 0x66, 0xa3, 0x5d, 0x58, 0x70, 0x36, 0xa8, + 0xa9, 0x26, 0x56, 0x4d, 0xc9, 0xd0, 0x7a, 0x7a, 0x03, 0xe7, 0xd2, 0xb6, 0x95, 0x96, 0x7c, 0x5b, + 0x24, 0x3c, 0x35, 0x9b, 0x45, 0xa4, 0xa6, 0x61, 0x88, 0xe8, 0x29, 0xf0, 0x72, 0xa3, 0x81, 0x0d, + 0x43, 0x21, 0xb6, 0x90, 0x74, 0xfc, 0x49, 0x4f, 0xd1, 0x71, 0x07, 0xab, 0xa6, 0x91, 0x9b, 0x60, + 0xa5, 0xd6, 0xb5, 0xae, 0xd6, 0xd6, 0x5a, 0xa7, 0x62, 0x9f, 0x47, 0xbc, 0xc8, 0x4c, 0xf7, 0x8c, + 0x18, 0xfc, 0x3b, 0x70, 0xce, 0x67, 0x94, 0x71, 0x32, 0x1b, 0x7f, 0x0f, 0xa6, 0xbd, 0x96, 0x18, + 0x2b, 0x2b, 0xfe, 0x28, 0x0e, 0x73, 0x01, 0x36, 0x40, 0x3b, 0x90, 0x31, 0x54, 0xb9, 0x6b, 0x3c, + 0xd3, 0x4c, 0xea, 0xbf, 0x37, 0x23, 0x4c, 0x96, 0xaf, 0x51, 0x5e, 0xf2, 0x73, 0x27, 0x26, 0xba, + 0xb3, 0xd1, 0x26, 0xa4, 0x89, 0x3d, 0xfd, 0xb9, 0x29, 0x48, 0x0e, 0xa1, 0xb9, 0x52, 0xe8, 0x4c, + 0xfe, 0x75, 0x98, 0x65, 0x57, 0x40, 0x97, 0x61, 0xca, 0x59, 0x41, 0x52, 0x9a, 0x74, 0xaf, 0xe0, + 0x90, 0xca, 0x4d, 0xfe, 0x35, 0x98, 0xf6, 0x0a, 0x43, 0x4b, 0x30, 0x49, 0x1d, 0xc2, 0x65, 0xcf, + 0x10, 0x42, 0xb9, 0xe9, 0xc6, 0xf4, 0x37, 0x60, 0x9e, 0xf5, 0x33, 0x1a, 0xca, 0xd7, 0xdd, 0x3d, + 0x10, 0x5b, 0xcc, 0xb2, 0x7b, 0x70, 0xf4, 0x14, 0x7e, 0x99, 0x84, 0xac, 0x3f, 0x68, 0xd0, 0x06, + 0xa4, 0x0e, 0xdb, 0x5a, 0xe3, 0x63, 0x3a, 0xf7, 0x95, 0xb0, 0xe8, 0xca, 0x6f, 0x5a, 0x5c, 0x84, + 0xba, 0x13, 0x13, 0xc9, 0x24, 0x6b, 0x76, 0x47, 0xeb, 0xa9, 0x26, 0xb5, 0x5e, 0xf8, 0xec, 0x47, + 0x16, 0x57, 0x7f, 0xb6, 0x3d, 0x09, 0x6d, 0xc1, 0x14, 0x71, 0x3b, 0xa9, 0xa3, 0x35, 0x71, 0x2e, + 0x61, 0xcb, 0xb8, 0x1a, 0x2a, 0xa3, 0x60, 0xf3, 0x3e, 0xd2, 0x9a, 0x58, 0x04, 0xd9, 0xfd, 0x9f, + 0x9f, 0x81, 0x29, 0x8f, 0x6e, 0xfc, 0x36, 0x4c, 0x79, 0x16, 0x43, 0x17, 0x60, 0xe2, 0xc8, 0x90, + 0xdc, 0x24, 0x3c, 0x29, 0xa6, 0x8f, 0x0c, 0x3b, 0x9f, 0x5e, 0x86, 0x29, 0x5b, 0x0b, 0xe9, 0xa8, + 0x2d, 0xb7, 0x8c, 0x5c, 0x7c, 0x35, 0x61, 0x9d, 0x91, 0x4d, 0x7a, 0x60, 0x51, 0xf8, 0x7f, 0x70, + 0x00, 0xfd, 0x25, 0xd1, 0x06, 0x24, 0x6d, 0x2d, 0x49, 0x2a, 0x5f, 0x1b, 0x41, 0xcb, 0xbc, 0xad, + 0xaa, 0x3d, 0x4b, 0xf8, 0x39, 0x07, 0x49, 0x5b, 0x8c, 0xff, 0xc2, 0xa9, 0x95, 0xab, 0xdb, 0x95, + 0x92, 0x54, 0xdd, 0xdd, 0x2a, 0x49, 0x8f, 0xc5, 0x72, 0xbd, 0x24, 0x66, 0x39, 0xb4, 0x04, 0x17, + 0xbc, 0x74, 0xb1, 0x54, 0xd8, 0x2a, 0x89, 0xd2, 0x6e, 0xb5, 0xf2, 0x24, 0x1b, 0x47, 0x3c, 0x2c, + 0x3e, 0xda, 0xaf, 0xd4, 0xcb, 0x83, 0x63, 0x09, 0xb4, 0x0c, 0x39, 0xcf, 0x18, 0x95, 0x41, 0xc5, + 0x26, 0x2d, 0xb1, 0x9e, 0x51, 0xf2, 0x2f, 0x1d, 0x4c, 0x6d, 0xce, 0xb8, 0x87, 0x61, 0x3b, 0xdb, + 0x63, 0x98, 0x61, 0x72, 0xb4, 0x55, 0x4e, 0xd1, 0xa4, 0xd2, 0x94, 0x0e, 0x4f, 0x4d, 0xbb, 0xc4, + 0xe0, 0xd6, 0x12, 0xe2, 0x8c, 0x43, 0xdd, 0xb4, 0x88, 0x96, 0x59, 0xdb, 0x4a, 0x47, 0x31, 0x29, + 0x4f, 0xdc, 0xe6, 0x01, 0x9b, 0x64, 0x33, 0x08, 0x5f, 0xc4, 0x21, 0x4d, 0xcf, 0xe6, 0x9a, 0xe7, + 0x96, 0x60, 0x44, 0x3a, 0x54, 0x22, 0x92, 0x09, 0x8e, 0x38, 0x1b, 0x1c, 0x68, 0x07, 0x66, 0xbd, + 0xa9, 0xf4, 0xc4, 0x29, 0xe2, 0xae, 0xb0, 0x07, 0xe4, 0x8d, 0xe7, 0x13, 0x5a, 0xba, 0xcd, 0x1c, + 0x7b, 0x69, 0x68, 0x13, 0x66, 0x7d, 0xd9, 0x38, 0x39, 0x3c, 0x1b, 0xcf, 0x34, 0x98, 0xc4, 0x54, + 0x80, 0x39, 0x27, 0x91, 0xb6, 0xb1, 0x64, 0xd2, 0x44, 0x4b, 0x6f, 0x8b, 0xec, 0x40, 0x02, 0x46, + 0x7d, 0x66, 0x87, 0xc6, 0xbf, 0x0b, 0x68, 0x50, 0xd7, 0xb1, 0xb2, 0x66, 0x0f, 0xe6, 0x02, 0x52, + 0x3c, 0xca, 0xc3, 0xa4, 0x7d, 0x54, 0x86, 0x62, 0x62, 0x5a, 0x1e, 0x0e, 0x6a, 0xd4, 0x67, 0xb1, + 0xf8, 0xbb, 0x3a, 0x3e, 0xc2, 0xba, 0x8e, 0x9b, 0x76, 0x78, 0x04, 0xf2, 0xbb, 0x2c, 0xc2, 0x0f, + 0x38, 0xc8, 0x38, 0x74, 0x74, 0x0f, 0x32, 0x06, 0x6e, 0x91, 0xeb, 0x87, 0xac, 0x75, 0xc9, 0x3f, + 0x37, 0x5f, 0xa3, 0x0c, 0xb4, 0x90, 0x76, 0xf8, 0xad, 0x42, 0x9a, 0x19, 0x1a, 0x6b, 0xf3, 0xbf, + 0xe5, 0x60, 0x6e, 0x0b, 0xb7, 0xb1, 0xbf, 0x4a, 0x89, 0xca, 0xb0, 0xde, 0x8b, 0x3d, 0xce, 0x5e, + 0xec, 0x01, 0xa2, 0x22, 0x2e, 0xf6, 0x33, 0x5d, 0x76, 0x8b, 0x30, 0xcf, 0xae, 0x46, 0xd2, 0xbb, + 0xf0, 0xcf, 0x04, 0x5c, 0xb2, 0x7c, 0x41, 0xd7, 0xda, 0x6d, 0xac, 0xef, 0xf5, 0x0e, 0xdb, 0x8a, + 0xf1, 0x6c, 0x8c, 0xcd, 0x5d, 0x80, 0x09, 0x55, 0x6b, 0x7a, 0x82, 0x27, 0x6d, 0xfd, 0x2c, 0x37, + 0x51, 0x09, 0xce, 0xfb, 0xcb, 0xac, 0x53, 0x9a, 0x84, 0xc3, 0x8b, 0xac, 0xec, 0xb1, 0xff, 0x06, + 0xe1, 0x21, 0x63, 0x15, 0x88, 0x9a, 0xda, 0x3e, 0xb5, 0x23, 0x26, 0x23, 0xba, 0xbf, 0x91, 0xe8, + 0xaf, 0x98, 0xde, 0x70, 0x2b, 0xa6, 0xc8, 0x1d, 0x45, 0x15, 0x4f, 0x1f, 0x0d, 0x44, 0x7c, 0xda, + 0x16, 0xfd, 0xf6, 0x88, 0xa2, 0x87, 0x66, 0x82, 0xb3, 0x9c, 0xe2, 0x0b, 0x08, 0xdf, 0x3f, 0x72, + 0x70, 0x39, 0x74, 0x0b, 0xf4, 0xca, 0x6f, 0xc2, 0xb9, 0x2e, 0x19, 0x70, 0x8d, 0x40, 0xa2, 0xec, + 0xfe, 0x50, 0x23, 0xd0, 0x2e, 0x96, 0x52, 0x19, 0x33, 0xcc, 0x76, 0x19, 0x22, 0x5f, 0x80, 0xb9, + 0x00, 0xb6, 0xb1, 0x36, 0xf3, 0x25, 0x07, 0xab, 0x7d, 0x55, 0xf6, 0xd5, 0xee, 0x8b, 0x73, 0xdf, + 0x7a, 0xdf, 0xb7, 0x48, 0xca, 0xbf, 0x3b, 0xb8, 0xf7, 0xe0, 0x05, 0x5f, 0x56, 0x04, 0x5f, 0x85, + 0x2b, 0x11, 0x4b, 0xd3, 0x70, 0xfe, 0x22, 0x09, 0x57, 0x0e, 0xe4, 0xb6, 0xd2, 0x74, 0x0b, 0xb9, + 0x80, 0x7e, 0x3f, 0xda, 0x24, 0x8d, 0x81, 0x08, 0x20, 0x59, 0x6b, 0xc3, 0x8d, 0xda, 0x61, 0xf2, + 0x47, 0xb8, 0x0e, 0x5f, 0x60, 0x13, 0xf6, 0x24, 0xa0, 0x09, 0x7b, 0x7b, 0x74, 0x5d, 0xa3, 0x5a, + 0xb2, 0x7d, 0x7f, 0x82, 0x79, 0x6b, 0x74, 0xb9, 0x11, 0x5e, 0x70, 0xe6, 0x28, 0xfe, 0x3a, 0xbb, + 0xa6, 0xdf, 0x27, 0x41, 0x88, 0xda, 0x3d, 0xcd, 0x21, 0x22, 0x4c, 0x36, 0x34, 0xf5, 0x48, 0xd1, + 0x3b, 0xb8, 0x49, 0xab, 0xff, 0x37, 0x47, 0x31, 0x1e, 0x4d, 0x20, 0x45, 0x67, 0xae, 0xd8, 0x17, + 0x83, 0x72, 0x30, 0xd1, 0xc1, 0x86, 0x21, 0xb7, 0x1c, 0xb5, 0x9c, 0x9f, 0xfc, 0x67, 0x09, 0x98, + 0x74, 0xa7, 0x20, 0x75, 0xc0, 0x83, 0x49, 0xfa, 0xda, 0x7e, 0x1e, 0x05, 0x9e, 0xdf, 0x99, 0xe3, + 0xcf, 0xe1, 0xcc, 0x4d, 0xc6, 0x99, 0x49, 0x38, 0x6c, 0x3d, 0x97, 0xda, 0x11, 0x7e, 0xfd, 0xb5, + 0x3b, 0xa0, 0xf0, 0x6d, 0x40, 0x15, 0xc5, 0xa0, 0x5d, 0x94, 0x9b, 0x96, 0xac, 0xa6, 0x49, 0x3e, + 0x91, 0xb0, 0x6a, 0xea, 0x0a, 0x2d, 0xd7, 0x53, 0x22, 0x74, 0xe4, 0x93, 0x12, 0xa1, 0x58, 0x25, + 0xbd, 0x61, 0xca, 0xba, 0xa9, 0xa8, 0x2d, 0xc9, 0xd4, 0x3e, 0xc6, 0x2e, 0xe8, 0xea, 0x50, 0xeb, + 0x16, 0x51, 0xf8, 0x34, 0x0e, 0x73, 0x8c, 0x78, 0xea, 0x93, 0xf7, 0x61, 0xa2, 0x2f, 0x9b, 0x29, + 0xe3, 0x03, 0xb8, 0xf3, 0xc4, 0x6c, 0xce, 0x0c, 0xb4, 0x02, 0xa0, 0xe2, 0x13, 0x93, 0x59, 0x77, + 0xd2, 0xa2, 0xd8, 0x6b, 0xf2, 0x1b, 0x6e, 0xcf, 0x6d, 0xca, 0x66, 0xcf, 0x40, 0xb7, 0x00, 0xd1, + 0x0c, 0x8d, 0x9b, 0x12, 0xbd, 0x62, 0xc8, 0xb2, 0x93, 0x62, 0xd6, 0x1d, 0xa9, 0xda, 0x97, 0x8d, + 0xc1, 0x7f, 0x02, 0x29, 0x62, 0xc4, 0x11, 0xbb, 0x6d, 0xf4, 0x2e, 0xa4, 0x0d, 0x7b, 0x21, 0x3f, + 0xb2, 0x10, 0xb4, 0x13, 0xaf, 0x62, 0x22, 0x9d, 0x27, 0x7c, 0x16, 0x07, 0xb4, 0x8d, 0x4d, 0xb7, + 0x0d, 0xa3, 0x67, 0x10, 0xe2, 0xcb, 0xdc, 0x73, 0xf8, 0xf2, 0x7b, 0x8c, 0x2f, 0x93, 0x68, 0xb8, + 0xe9, 0x41, 0xbf, 0x7d, 0x4b, 0x47, 0x66, 0xe2, 0x90, 0xd6, 0x87, 0xd4, 0x93, 0xa3, 0xb5, 0x3e, + 0x67, 0x74, 0xd9, 0x2d, 0x98, 0x63, 0x74, 0xa6, 0x3e, 0x75, 0x1b, 0x90, 0x7c, 0x2c, 0x2b, 0x6d, + 0xd9, 0xd2, 0xcb, 0xe9, 0x2c, 0x69, 0xa7, 0x79, 0xde, 0x1d, 0x71, 0xa6, 0x09, 0x82, 0xb7, 0x60, + 0xa1, 0xf2, 0xfc, 0x68, 0x7c, 0xdb, 0x7b, 0xd1, 0x0f, 0xf0, 0xd0, 0x75, 0xb7, 0x03, 0x11, 0xf9, + 0xab, 0x83, 0x45, 0x0a, 0x85, 0xa7, 0x43, 0xc1, 0xf9, 0x5f, 0x25, 0x60, 0x29, 0x82, 0x1b, 0xdd, + 0x87, 0x84, 0xde, 0x6d, 0x50, 0x77, 0x7c, 0x75, 0x04, 0xf9, 0x79, 0x71, 0xaf, 0xb8, 0x13, 0x13, + 0xad, 0x59, 0xfc, 0x1f, 0xe2, 0x90, 0x10, 0xf7, 0x8a, 0xe8, 0x5d, 0x06, 0xa9, 0xbe, 0x35, 0xa2, + 0x14, 0x2f, 0x50, 0xfd, 0x1f, 0x2e, 0x08, 0xa9, 0xce, 0xc1, 0x7c, 0x51, 0x2c, 0x15, 0xea, 0x25, + 0x69, 0xab, 0x54, 0x29, 0xd5, 0x4b, 0x12, 0x41, 0xd2, 0xb3, 0x1c, 0x5a, 0x86, 0xdc, 0xde, 0xfe, + 0x66, 0xa5, 0x5c, 0xdb, 0x91, 0xf6, 0xab, 0xce, 0x7f, 0x74, 0x34, 0x8e, 0xb2, 0x30, 0x5d, 0x29, + 0xd7, 0xea, 0x94, 0x50, 0xcb, 0x26, 0x2c, 0xca, 0x76, 0xa9, 0x2e, 0x15, 0x0b, 0x7b, 0x85, 0x62, + 0xb9, 0xfe, 0x24, 0x9b, 0x44, 0x3c, 0x2c, 0xb2, 0xb2, 0x6b, 0xd5, 0xc2, 0x5e, 0x6d, 0x67, 0xb7, + 0x9e, 0x4d, 0x21, 0x04, 0xb3, 0xf6, 0x7c, 0x87, 0x54, 0xcb, 0xa6, 0x2d, 0x09, 0xc5, 0xca, 0x6e, + 0xd5, 0xd5, 0x61, 0x02, 0xcd, 0x43, 0xd6, 0x59, 0x59, 0x2c, 0x15, 0xb6, 0x6c, 0x14, 0x25, 0x83, + 0xce, 0xc3, 0x4c, 0xe9, 0x83, 0xbd, 0x42, 0x75, 0xcb, 0x61, 0x9c, 0x44, 0xab, 0xb0, 0xec, 0x55, + 0x47, 0xa2, 0xb3, 0x4a, 0x5b, 0x36, 0x96, 0x52, 0xcb, 0x82, 0x8b, 0xd2, 0x7d, 0x19, 0x87, 0x05, + 0x02, 0xd3, 0x39, 0xa0, 0xa0, 0x13, 0xb8, 0x6b, 0x90, 0x25, 0xc0, 0x82, 0xe4, 0x2f, 0xed, 0x66, + 0x09, 0xfd, 0xc0, 0x29, 0xf0, 0x1c, 0x48, 0x3d, 0xee, 0x81, 0xd4, 0xcb, 0xfe, 0x72, 0xf7, 0x26, + 0x0b, 0x3e, 0xfb, 0x56, 0x8b, 0xea, 0xa0, 0x1e, 0x05, 0xd4, 0x63, 0xb7, 0xa3, 0xa5, 0x45, 0xdd, + 0x55, 0x67, 0x69, 0x97, 0xce, 0x18, 0xf2, 0x0f, 0x60, 0xd1, 0xaf, 0x2f, 0x8d, 0xbe, 0x5b, 0x03, + 0x10, 0xb1, 0x9b, 0x83, 0x5c, 0x5e, 0x97, 0x43, 0xf8, 0x0b, 0x07, 0x19, 0x87, 0x6c, 0xdd, 0x23, + 0x86, 0xf2, 0x5d, 0xcc, 0x40, 0x52, 0x93, 0x16, 0xc5, 0x45, 0xb8, 0xbc, 0xe0, 0x6e, 0xdc, 0x0f, + 0xee, 0x06, 0x9e, 0x73, 0x22, 0xf0, 0x9c, 0xbf, 0x09, 0x33, 0x0d, 0x4b, 0x7d, 0x45, 0x53, 0x25, + 0x53, 0xe9, 0x38, 0x88, 0xd3, 0xe0, 0x63, 0x4c, 0xdd, 0x79, 0x41, 0x15, 0xa7, 0x9d, 0x09, 0x16, + 0x09, 0xad, 0xc2, 0xb4, 0xfd, 0x38, 0x23, 0x99, 0x9a, 0xd4, 0x33, 0x70, 0x2e, 0x65, 0xf7, 0xdf, + 0x60, 0xd3, 0xea, 0xda, 0xbe, 0x81, 0x85, 0xdf, 0x71, 0xb0, 0x40, 0x60, 0x05, 0xbf, 0x3b, 0x0e, + 0x03, 0xa9, 0xbd, 0x1e, 0xe7, 0xbb, 0x1a, 0x02, 0x05, 0xbe, 0xac, 0xae, 0x2a, 0x07, 0x8b, 0xfe, + 0xf5, 0x68, 0x2b, 0xf5, 0x79, 0x1c, 0xe6, 0xad, 0xdb, 0xd4, 0x19, 0x78, 0xd1, 0x65, 0xca, 0x18, + 0x27, 0xe9, 0x33, 0x66, 0x72, 0xc0, 0x98, 0x3b, 0xfe, 0x46, 0xe5, 0x86, 0xb7, 0x1e, 0xf0, 0xef, + 0xe0, 0x65, 0xd9, 0xf2, 0xd7, 0x1c, 0x2c, 0xf8, 0xd6, 0xa3, 0xf1, 0xf2, 0x8e, 0xbf, 0xf2, 0xba, + 0x1a, 0xa2, 0xdf, 0x73, 0xd5, 0x5e, 0x77, 0x9d, 0xea, 0x69, 0xbc, 0xb0, 0xfc, 0x73, 0x1c, 0x56, + 0xfa, 0x37, 0x90, 0xfd, 0x3c, 0xda, 0x1c, 0x03, 0x3a, 0x38, 0xdb, 0x2b, 0xe4, 0xfb, 0xfe, 0x84, + 0xbb, 0x3e, 0x78, 0x29, 0x06, 0xa8, 0x14, 0x95, 0x78, 0x03, 0x11, 0xb7, 0xe4, 0xb8, 0x88, 0xdb, + 0x99, 0x3c, 0xe0, 0x7b, 0x5e, 0x30, 0x91, 0x55, 0x9f, 0x7a, 0xc2, 0x88, 0xa8, 0xfc, 0x5b, 0x70, + 0xc1, 0x2e, 0x9a, 0xdd, 0xd7, 0x7d, 0xe7, 0xcd, 0x91, 0xa4, 0xc4, 0x8c, 0xb8, 0x60, 0x0d, 0xbb, + 0x4f, 0xda, 0x14, 0x89, 0x6e, 0x0a, 0x5f, 0x25, 0x61, 0xd1, 0x2a, 0xaa, 0x6b, 0xa6, 0xdc, 0x1a, + 0x07, 0xa3, 0xfd, 0xd6, 0x20, 0xe4, 0x15, 0x67, 0x8f, 0x25, 0x58, 0xea, 0x28, 0x48, 0x17, 0xca, + 0xc3, 0x9c, 0x61, 0xca, 0x2d, 0x3b, 0x1d, 0xc8, 0x7a, 0x0b, 0x9b, 0x52, 0x57, 0x36, 0x9f, 0xd1, + 0x58, 0x3f, 0x4f, 0x87, 0xea, 0xf6, 0xc8, 0x9e, 0x6c, 0x3e, 0x7b, 0x41, 0x07, 0x89, 0xde, 0xf3, + 0x27, 0x85, 0xd7, 0x86, 0xec, 0x25, 0xc2, 0xb7, 0x3e, 0x08, 0x81, 0x45, 0x5f, 0x1f, 0x22, 0x72, + 0x38, 0x1c, 0x7a, 0x76, 0x18, 0xf0, 0x6b, 0x46, 0x54, 0x2f, 0xc2, 0x85, 0x81, 0xcd, 0xd3, 0x2b, + 0xa4, 0x05, 0x39, 0x6b, 0x68, 0x5f, 0x35, 0xc6, 0x74, 0xc7, 0x10, 0x8f, 0x89, 0x87, 0x78, 0x8c, + 0xb0, 0x04, 0x17, 0x03, 0x16, 0xa2, 0x5a, 0xfc, 0x26, 0x45, 0xd4, 0x18, 0x1f, 0xdc, 0xff, 0x30, + 0x2c, 0x2a, 0xde, 0xf4, 0x1e, 0x7b, 0x20, 0x0e, 0xfe, 0x32, 0xe2, 0xe2, 0x32, 0x4c, 0x79, 0xf9, + 0xe8, 0x35, 0x68, 0x0e, 0x09, 0x9c, 0xd4, 0x99, 0xde, 0x1c, 0xd2, 0xbe, 0x37, 0x87, 0x4a, 0x3f, + 0xa8, 0x26, 0xd8, 0xd2, 0x36, 0xd4, 0x14, 0x11, 0x61, 0xf5, 0x74, 0x20, 0xac, 0x32, 0xec, 0x43, + 0x46, 0xa8, 0xd0, 0xff, 0x81, 0xc0, 0xa2, 0x4e, 0x1d, 0xf8, 0xc2, 0x20, 0x3c, 0x05, 0x9e, 0x78, + 0xfc, 0xf8, 0x98, 0xbf, 0xcf, 0x8d, 0xe2, 0x7e, 0x37, 0x12, 0x56, 0x60, 0x29, 0x50, 0x36, 0x5d, + 0xfa, 0x87, 0x1c, 0x51, 0x6c, 0x1b, 0x9b, 0x7d, 0x64, 0xc5, 0x18, 0x75, 0x69, 0x3a, 0xe8, 0x5d, + 0x9a, 0x90, 0x6c, 0x0f, 0x1e, 0x33, 0x24, 0x84, 0x6d, 0x62, 0x06, 0xbf, 0x2a, 0xf4, 0xb2, 0xbd, + 0x01, 0xa9, 0x9e, 0x0d, 0x97, 0x92, 0xa2, 0x6b, 0x8e, 0x8d, 0x81, 0x7d, 0x6b, 0x48, 0x24, 0x1c, + 0xc2, 0xe7, 0x1c, 0x4c, 0x79, 0xc8, 0x68, 0x19, 0x26, 0x5d, 0xf4, 0xc2, 0xe9, 0x52, 0x5c, 0x82, + 0x75, 0x68, 0xa6, 0x66, 0xca, 0x6d, 0xfa, 0x02, 0x4f, 0x7e, 0x58, 0x8d, 0x65, 0xcf, 0xc0, 0xa4, + 0x88, 0x4d, 0x88, 0xf6, 0xff, 0xe8, 0x16, 0x24, 0x7b, 0xaa, 0x62, 0xda, 0xc1, 0x3a, 0xeb, 0x8f, + 0x42, 0x7b, 0xa9, 0xfc, 0xbe, 0xaa, 0x98, 0xa2, 0xcd, 0x25, 0xdc, 0x84, 0xa4, 0xf5, 0x8b, 0x6d, + 0xf2, 0x27, 0x21, 0xb5, 0xf9, 0xa4, 0x5e, 0xaa, 0x65, 0x39, 0x04, 0x90, 0x2e, 0x93, 0x96, 0x38, + 0x2e, 0x2c, 0xbb, 0x5b, 0x0f, 0x02, 0x51, 0x3e, 0x22, 0x67, 0x18, 0x06, 0x9f, 0x14, 0x02, 0xe1, + 0x93, 0x15, 0xe6, 0x36, 0x1b, 0x02, 0x9c, 0xfc, 0x8b, 0x83, 0x85, 0x40, 0x3e, 0x74, 0xd7, 0x0b, + 0x99, 0x5c, 0x89, 0x94, 0xe9, 0x05, 0x4b, 0x7e, 0xc6, 0x11, 0xb0, 0xe4, 0x1e, 0x03, 0x96, 0x5c, + 0x1f, 0x3a, 0xdf, 0x0b, 0x93, 0x1c, 0x84, 0xa0, 0x24, 0xb5, 0x7a, 0x61, 0xbb, 0x24, 0xed, 0x57, + 0xc9, 0x5f, 0x17, 0x25, 0x99, 0x87, 0xec, 0x76, 0xc9, 0xc1, 0x1d, 0xa4, 0x5a, 0xbd, 0x50, 0xaf, + 0x65, 0xe3, 0x83, 0x08, 0x45, 0xc2, 0xc5, 0x1f, 0xe6, 0x01, 0x51, 0xb3, 0x7a, 0x3f, 0x9d, 0xfd, + 0x94, 0x83, 0x39, 0x86, 0x4c, 0xad, 0xec, 0x79, 0x5d, 0xe3, 0x98, 0xd7, 0xb5, 0x3b, 0x30, 0x6f, + 0xb5, 0x50, 0xc4, 0xf1, 0x0d, 0xa9, 0x8b, 0x75, 0x1b, 0x23, 0xa5, 0xee, 0x74, 0xbe, 0x23, 0x9f, + 0x50, 0x0c, 0x73, 0x0f, 0xeb, 0x96, 0xe0, 0x17, 0x80, 0xff, 0x09, 0x3f, 0x8e, 0x93, 0x8b, 0x7a, + 0xec, 0x42, 0x7f, 0x68, 0xd0, 0x0e, 0x76, 0x02, 0x89, 0x31, 0x3a, 0x81, 0x90, 0x90, 0x4f, 0x8e, + 0x55, 0x1d, 0x8e, 0x7d, 0xc9, 0x09, 0x05, 0x52, 0x14, 0x9c, 0xa1, 0x48, 0x5f, 0xff, 0x37, 0x07, + 0x99, 0x72, 0x13, 0xab, 0xa6, 0xe5, 0xf4, 0x55, 0x98, 0x61, 0xbe, 0x68, 0x46, 0xcb, 0x21, 0x1f, + 0x3a, 0xdb, 0x16, 0xe7, 0x57, 0x22, 0x3f, 0x83, 0x16, 0x62, 0xe8, 0xc8, 0xf3, 0x35, 0x36, 0x03, + 0x2b, 0xbf, 0x32, 0x30, 0x33, 0x20, 0xfe, 0xf9, 0x6b, 0x43, 0xb8, 0xdc, 0x75, 0xde, 0x82, 0x94, + 0xfd, 0xed, 0x2a, 0x9a, 0x77, 0xbf, 0x9f, 0xf5, 0x7c, 0xda, 0xca, 0x2f, 0xf8, 0xa8, 0xce, 0xbc, + 0xf5, 0xbf, 0x67, 0x00, 0xfa, 0xbd, 0x0e, 0x7a, 0x08, 0xd3, 0xde, 0xcf, 0xe7, 0xd0, 0x52, 0xc4, + 0xc7, 0x9b, 0xfc, 0x72, 0xf0, 0xa0, 0xab, 0xd3, 0x43, 0x98, 0xf6, 0x7e, 0xac, 0xd1, 0x17, 0x16, + 0xf0, 0xc1, 0x48, 0x5f, 0x58, 0xe0, 0xf7, 0x1d, 0x31, 0xd4, 0x86, 0x0b, 0x21, 0xcf, 0xf5, 0xe8, + 0xfa, 0x68, 0x1f, 0x35, 0xf0, 0xaf, 0x8e, 0xf8, 0xee, 0x2f, 0xc4, 0x90, 0x0e, 0x17, 0x43, 0x5f, + 0xa9, 0xd1, 0xda, 0xa8, 0x6f, 0xe8, 0xfc, 0x8d, 0x11, 0x38, 0xdd, 0x35, 0x7b, 0xc0, 0x87, 0x3f, + 0x8d, 0xa1, 0x1b, 0x23, 0xbf, 0xd9, 0xf2, 0x37, 0x47, 0x7f, 0x69, 0x13, 0x62, 0x68, 0x07, 0xa6, + 0x3c, 0xaf, 0x2d, 0x88, 0x0f, 0x7c, 0x82, 0x21, 0x82, 0x97, 0x22, 0x9e, 0x67, 0x88, 0x24, 0xcf, + 0xdb, 0x42, 0x5f, 0xd2, 0xe0, 0x23, 0x49, 0x5f, 0x52, 0xc0, 0x63, 0x84, 0xdf, 0xfc, 0xbe, 0xcb, + 0x2f, 0xc8, 0xfc, 0xc1, 0xb7, 0x67, 0x90, 0xf9, 0x43, 0x6e, 0x52, 0x21, 0x86, 0xde, 0x87, 0x59, + 0x16, 0x26, 0x45, 0x2b, 0x91, 0x70, 0x2f, 0x7f, 0x29, 0x6c, 0xd8, 0x2b, 0x92, 0x45, 0xe5, 0xfa, + 0x22, 0x03, 0xd1, 0xc1, 0xbe, 0xc8, 0x10, 0x30, 0x2f, 0x66, 0xe5, 0x27, 0x06, 0x6b, 0xea, 0xe7, + 0xa7, 0x20, 0x88, 0xac, 0x9f, 0x9f, 0x02, 0x01, 0x2a, 0x21, 0x86, 0x14, 0x58, 0x0c, 0x86, 0x3a, + 0xd0, 0xb5, 0x91, 0x90, 0x1c, 0xfe, 0xfa, 0x30, 0x36, 0x37, 0xd5, 0xfc, 0x2d, 0x05, 0x49, 0xfb, + 0x16, 0xac, 0xc3, 0x39, 0x5f, 0xab, 0x89, 0x2e, 0x45, 0x37, 0xe0, 0xfc, 0xe5, 0xd0, 0x71, 0x77, + 0x27, 0x4f, 0xe1, 0xfc, 0x40, 0xf3, 0x88, 0x56, 0xbd, 0xf3, 0x82, 0x1a, 0x58, 0xfe, 0x4a, 0x04, + 0x87, 0x5f, 0x36, 0x9b, 0x76, 0x56, 0x87, 0x75, 0x37, 0xac, 0xec, 0xb0, 0x54, 0xf3, 0x11, 0x29, + 0x3a, 0xfc, 0x49, 0x46, 0x60, 0xf5, 0x0a, 0x4c, 0x2f, 0x57, 0x23, 0x79, 0xdc, 0x15, 0x3e, 0x74, + 0xab, 0x1d, 0x4f, 0x75, 0x8d, 0x18, 0xe5, 0x02, 0x9b, 0x00, 0x5e, 0x88, 0x62, 0x71, 0xc5, 0x3f, + 0x86, 0xac, 0xff, 0x0a, 0x46, 0xcc, 0x79, 0x05, 0xb9, 0xcd, 0x6a, 0x38, 0x83, 0xdf, 0x32, 0xfe, + 0xf8, 0xf7, 0x6b, 0x15, 0x14, 0xf9, 0x57, 0x23, 0x79, 0xbc, 0x19, 0xcb, 0x53, 0xf0, 0xf5, 0x33, + 0xd6, 0x60, 0x71, 0xd8, 0xcf, 0x58, 0x01, 0x15, 0xa2, 0x10, 0xbb, 0xf7, 0x0e, 0x40, 0xc3, 0x50, + 0x24, 0xd2, 0x11, 0xa3, 0x95, 0x81, 0xc7, 0x89, 0x07, 0x0a, 0x6e, 0x37, 0x77, 0xbb, 0xa6, 0xa2, + 0xa9, 0x46, 0xee, 0x17, 0x19, 0xbb, 0x1d, 0x9f, 0x6c, 0x18, 0x0a, 0x69, 0x4c, 0x37, 0x53, 0x4f, + 0x13, 0x0d, 0x43, 0x39, 0x4c, 0xdb, 0xfc, 0x6f, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x16, 0x7c, + 0xfe, 0x46, 0x7f, 0x36, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IdentityClient is the client API for Identity service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IdentityClient interface { + GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) + GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) + Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) +} + +type identityClient struct { + cc *grpc.ClientConn +} + +func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { + return &identityClient{cc} +} + +func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { + out := new(GetPluginInfoResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) { + out := new(GetPluginCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) { + out := new(ProbeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Identity/Probe", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IdentityServer is the server API for Identity service. +type IdentityServer interface { + GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) + GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) + Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) +} + +// UnimplementedIdentityServer can be embedded to have forward compatible implementations. +type UnimplementedIdentityServer struct { +} + +func (*UnimplementedIdentityServer) GetPluginInfo(ctx context.Context, req *GetPluginInfoRequest) (*GetPluginInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPluginInfo not implemented") +} +func (*UnimplementedIdentityServer) GetPluginCapabilities(ctx context.Context, req *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPluginCapabilities not implemented") +} +func (*UnimplementedIdentityServer) Probe(ctx context.Context, req *ProbeRequest) (*ProbeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Probe not implemented") +} + +func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { + s.RegisterService(&_Identity_serviceDesc, srv) +} + +func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Identity/GetPluginInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Identity/GetPluginCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProbeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).Probe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Identity/Probe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Identity_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Identity", + HandlerType: (*IdentityServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetPluginInfo", + Handler: _Identity_GetPluginInfo_Handler, + }, + { + MethodName: "GetPluginCapabilities", + Handler: _Identity_GetPluginCapabilities_Handler, + }, + { + MethodName: "Probe", + Handler: _Identity_Probe_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/container-storage-interface/spec/csi.proto", +} + +// ControllerClient is the client API for Controller service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ControllerClient interface { + CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) + DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) + ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) + GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) + ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) + DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) + ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) + ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) +} + +type controllerClient struct { + cc *grpc.ClientConn +} + +func NewControllerClient(cc *grpc.ClientConn) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { + out := new(CreateVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { + out := new(DeleteVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + out := new(ControllerPublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerPublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + out := new(ControllerUnpublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerUnpublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { + out := new(ValidateVolumeCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ValidateVolumeCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { + out := new(ListVolumesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListVolumes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { + out := new(GetCapacityResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/GetCapacity", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { + out := new(ControllerGetCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerGetCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { + out := new(CreateSnapshotResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) { + out := new(DeleteSnapshotResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) { + out := new(ControllerExpandVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerExpandVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ControllerServer is the server API for Controller service. +type ControllerServer interface { + CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) + DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) + ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) + GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) + ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) + DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) + ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) + ControllerExpandVolume(context.Context, *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) +} + +// UnimplementedControllerServer can be embedded to have forward compatible implementations. +type UnimplementedControllerServer struct { +} + +func (*UnimplementedControllerServer) CreateVolume(ctx context.Context, req *CreateVolumeRequest) (*CreateVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateVolume not implemented") +} +func (*UnimplementedControllerServer) DeleteVolume(ctx context.Context, req *DeleteVolumeRequest) (*DeleteVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteVolume not implemented") +} +func (*UnimplementedControllerServer) ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerPublishVolume not implemented") +} +func (*UnimplementedControllerServer) ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerUnpublishVolume not implemented") +} +func (*UnimplementedControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateVolumeCapabilities not implemented") +} +func (*UnimplementedControllerServer) ListVolumes(ctx context.Context, req *ListVolumesRequest) (*ListVolumesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListVolumes not implemented") +} +func (*UnimplementedControllerServer) GetCapacity(ctx context.Context, req *GetCapacityRequest) (*GetCapacityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCapacity not implemented") +} +func (*UnimplementedControllerServer) ControllerGetCapabilities(ctx context.Context, req *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerGetCapabilities not implemented") +} +func (*UnimplementedControllerServer) CreateSnapshot(ctx context.Context, req *CreateSnapshotRequest) (*CreateSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented") +} +func (*UnimplementedControllerServer) DeleteSnapshot(ctx context.Context, req *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") +} +func (*UnimplementedControllerServer) ListSnapshots(ctx context.Context, req *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedControllerServer) ControllerExpandVolume(ctx context.Context, req *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerExpandVolume not implemented") +} + +func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { + s.RegisterService(&_Controller_serviceDesc, srv) +} + +func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/CreateVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/DeleteVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerPublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerPublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateVolumeCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ValidateVolumeCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVolumesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListVolumes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ListVolumes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCapacityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).GetCapacity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/GetCapacity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/CreateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/DeleteSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerExpandVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerExpandVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerExpandVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerExpandVolume(ctx, req.(*ControllerExpandVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Controller_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateVolume", + Handler: _Controller_CreateVolume_Handler, + }, + { + MethodName: "DeleteVolume", + Handler: _Controller_DeleteVolume_Handler, + }, + { + MethodName: "ControllerPublishVolume", + Handler: _Controller_ControllerPublishVolume_Handler, + }, + { + MethodName: "ControllerUnpublishVolume", + Handler: _Controller_ControllerUnpublishVolume_Handler, + }, + { + MethodName: "ValidateVolumeCapabilities", + Handler: _Controller_ValidateVolumeCapabilities_Handler, + }, + { + MethodName: "ListVolumes", + Handler: _Controller_ListVolumes_Handler, + }, + { + MethodName: "GetCapacity", + Handler: _Controller_GetCapacity_Handler, + }, + { + MethodName: "ControllerGetCapabilities", + Handler: _Controller_ControllerGetCapabilities_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _Controller_CreateSnapshot_Handler, + }, + { + MethodName: "DeleteSnapshot", + Handler: _Controller_DeleteSnapshot_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _Controller_ListSnapshots_Handler, + }, + { + MethodName: "ControllerExpandVolume", + Handler: _Controller_ControllerExpandVolume_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/container-storage-interface/spec/csi.proto", +} + +// NodeClient is the client API for Node service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NodeClient interface { + NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) + NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) + NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) + NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) + NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) +} + +type nodeClient struct { + cc *grpc.ClientConn +} + +func NewNodeClient(cc *grpc.ClientConn) NodeClient { + return &nodeClient{cc} +} + +func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) { + out := new(NodeStageVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeStageVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) { + out := new(NodeUnstageVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnstageVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { + out := new(NodePublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodePublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { + out := new(NodeUnpublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnpublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) { + out := new(NodeGetVolumeStatsResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetVolumeStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) { + out := new(NodeExpandVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeExpandVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { + out := new(NodeGetCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) { + out := new(NodeGetInfoResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NodeServer is the server API for Node service. +type NodeServer interface { + NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) + NodeGetVolumeStats(context.Context, *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) + NodeExpandVolume(context.Context, *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) + NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) + NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) +} + +// UnimplementedNodeServer can be embedded to have forward compatible implementations. +type UnimplementedNodeServer struct { +} + +func (*UnimplementedNodeServer) NodeStageVolume(ctx context.Context, req *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeStageVolume not implemented") +} +func (*UnimplementedNodeServer) NodeUnstageVolume(ctx context.Context, req *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeUnstageVolume not implemented") +} +func (*UnimplementedNodeServer) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodePublishVolume not implemented") +} +func (*UnimplementedNodeServer) NodeUnpublishVolume(ctx context.Context, req *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeUnpublishVolume not implemented") +} +func (*UnimplementedNodeServer) NodeGetVolumeStats(ctx context.Context, req *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetVolumeStats not implemented") +} +func (*UnimplementedNodeServer) NodeExpandVolume(ctx context.Context, req *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeExpandVolume not implemented") +} +func (*UnimplementedNodeServer) NodeGetCapabilities(ctx context.Context, req *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetCapabilities not implemented") +} +func (*UnimplementedNodeServer) NodeGetInfo(ctx context.Context, req *NodeGetInfoRequest) (*NodeGetInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetInfo not implemented") +} + +func RegisterNodeServer(s *grpc.Server, srv NodeServer) { + s.RegisterService(&_Node_serviceDesc, srv) +} + +func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeStageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeStageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeStageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnstageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnstageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeUnstageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodePublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodePublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodePublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetVolumeStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetVolumeStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetVolumeStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeGetVolumeStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetVolumeStats(ctx, req.(*NodeGetVolumeStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeExpandVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeExpandVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeExpandVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeExpandVolume(ctx, req.(*NodeExpandVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeGetInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Node_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Node", + HandlerType: (*NodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NodeStageVolume", + Handler: _Node_NodeStageVolume_Handler, + }, + { + MethodName: "NodeUnstageVolume", + Handler: _Node_NodeUnstageVolume_Handler, + }, + { + MethodName: "NodePublishVolume", + Handler: _Node_NodePublishVolume_Handler, + }, + { + MethodName: "NodeUnpublishVolume", + Handler: _Node_NodeUnpublishVolume_Handler, + }, + { + MethodName: "NodeGetVolumeStats", + Handler: _Node_NodeGetVolumeStats_Handler, + }, + { + MethodName: "NodeExpandVolume", + Handler: _Node_NodeExpandVolume_Handler, + }, + { + MethodName: "NodeGetCapabilities", + Handler: _Node_NodeGetCapabilities_Handler, + }, + { + MethodName: "NodeGetInfo", + Handler: _Node_NodeGetInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/container-storage-interface/spec/csi.proto", +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE new file mode 100644 index 000000000..b2b065037 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go new file mode 100644 index 000000000..ad35f09a8 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go @@ -0,0 +1,44 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils" +) + +// BackoffLinear is very simple: it waits for a fixed period of time between calls. +func BackoffLinear(waitBetween time.Duration) BackoffFunc { + return func(attempt uint) time.Duration { + return waitBetween + } +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc { + return func(attempt uint) time.Duration { + return backoffutils.JitterUp(waitBetween, jitterFraction) + } +} + +// BackoffExponential produces increasing intervals for each attempt. +// +// The scalar is multiplied times 2 raised to the current attempt. So the first +// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s. +func BackoffExponential(scalar time.Duration) BackoffFunc { + return func(attempt uint) time.Duration { + return scalar * time.Duration(backoffutils.ExponentBase2(attempt)) + } +} + +// BackoffExponentialWithJitter creates an exponential backoff like +// BackoffExponential does, but adds jitter. +func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc { + return func(attempt uint) time.Duration { + return backoffutils.JitterUp(scalar*time.Duration(backoffutils.ExponentBase2(attempt)), jitterFraction) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go new file mode 100644 index 000000000..afd924a14 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go @@ -0,0 +1,25 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +`grpc_retry` provides client-side request retry logic for gRPC. + +Client-Side Request Retry Interceptor + +It allows for automatic retry, inside the generated gRPC code of requests based on the gRPC status +of the reply. It supports unary (1:1), and server stream (1:n) requests. + +By default the interceptors *are disabled*, preventing accidental use of retries. You can easily +override the number of retries (setting them to more than 0) with a `grpc.ClientOption`, e.g.: + + myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5)) + +Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC codes, use a 50ms +linear backoff with 10% jitter. + +For chained interceptors, the retry interceptor will call every interceptor that follows it +whenever when a retry happens. + +Please see examples for more advanced use. +*/ +package grpc_retry diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go new file mode 100644 index 000000000..7a633e293 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go @@ -0,0 +1,142 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "context" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // DefaultRetriableCodes is a set of well known types gRPC codes that should be retri-able. + // + // `ResourceExhausted` means that the user quota, e.g. per-RPC limits, have been reached. + // `Unavailable` means that system is currently unavailable and the client should retry again. + DefaultRetriableCodes = []codes.Code{codes.ResourceExhausted, codes.Unavailable} + + defaultOptions = &options{ + max: 0, // disabled + perCallTimeout: 0, // disabled + includeHeader: true, + codes: DefaultRetriableCodes, + backoffFunc: BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { + return BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10)(attempt) + }), + } +) + +// BackoffFunc denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. +type BackoffFunc func(attempt uint) time.Duration + +// BackoffFuncContext denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. The context can be used to extract request scoped metadata and context values. +type BackoffFuncContext func(ctx context.Context, attempt uint) time.Duration + +// Disable disables the retry behaviour on this call, or this interceptor. +// +// Its semantically the same to `WithMax` +func Disable() CallOption { + return WithMax(0) +} + +// WithMax sets the maximum number of retries on this call, or this interceptor. +func WithMax(maxRetries uint) CallOption { + return CallOption{applyFunc: func(o *options) { + o.max = maxRetries + }} +} + +// WithBackoff sets the `BackoffFunc` used to control time between retries. +func WithBackoff(bf BackoffFunc) CallOption { + return CallOption{applyFunc: func(o *options) { + o.backoffFunc = BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { + return bf(attempt) + }) + }} +} + +// WithBackoffContext sets the `BackoffFuncContext` used to control time between retries. +func WithBackoffContext(bf BackoffFuncContext) CallOption { + return CallOption{applyFunc: func(o *options) { + o.backoffFunc = bf + }} +} + +// WithCodes sets which codes should be retried. +// +// Please *use with care*, as you may be retrying non-idempotent calls. +// +// You cannot automatically retry on Cancelled and Deadline, please use `WithPerRetryTimeout` for these. +func WithCodes(retryCodes ...codes.Code) CallOption { + return CallOption{applyFunc: func(o *options) { + o.codes = retryCodes + }} +} + +// WithPerRetryTimeout sets the RPC timeout per call (including initial call) on this call, or this interceptor. +// +// The context.Deadline of the call takes precedence and sets the maximum time the whole invocation +// will take, but WithPerRetryTimeout can be used to limit the RPC time per each call. +// +// For example, with context.Deadline = now + 10s, and WithPerRetryTimeout(3 * time.Seconds), each +// of the retry calls (including the initial one) will have a deadline of now + 3s. +// +// A value of 0 disables the timeout overrides completely and returns to each retry call using the +// parent `context.Deadline`. +// +// Note that when this is enabled, any DeadlineExceeded errors that are propagated up will be retried. +func WithPerRetryTimeout(timeout time.Duration) CallOption { + return CallOption{applyFunc: func(o *options) { + o.perCallTimeout = timeout + }} +} + +type options struct { + max uint + perCallTimeout time.Duration + includeHeader bool + codes []codes.Code + backoffFunc BackoffFuncContext +} + +// CallOption is a grpc.CallOption that is local to grpc_retry. +type CallOption struct { + grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. + applyFunc func(opt *options) +} + +func reuseOrNewWithCallOptions(opt *options, callOptions []CallOption) *options { + if len(callOptions) == 0 { + return opt + } + optCopy := &options{} + *optCopy = *opt + for _, f := range callOptions { + f.applyFunc(optCopy) + } + return optCopy +} + +func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []CallOption) { + for _, opt := range callOptions { + if co, ok := opt.(CallOption); ok { + retryOptions = append(retryOptions, co) + } else { + grpcOptions = append(grpcOptions, opt) + } + } + return grpcOptions, retryOptions +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go new file mode 100644 index 000000000..6793f17e6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go @@ -0,0 +1,323 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "context" + "fmt" + "io" + "sync" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "golang.org/x/net/trace" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + AttemptMetadataKey = "x-retry-attempty" +) + +// UnaryClientInterceptor returns a new retrying unary client interceptor. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(parentCtx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return invoker(parentCtx, method, req, reply, cc, grpcOpts...) + } + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { + return err + } + callCtx := perCallContext(parentCtx, callOpts, attempt) + lastErr = invoker(callCtx, method, req, reply, cc, grpcOpts...) + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + if lastErr == nil { + return nil + } + logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + if isContextError(lastErr) { + if parentCtx.Err() != nil { + logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) + // its the parent context deadline or cancellation. + return lastErr + } else if callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) + continue + } + } + if !isRetriable(lastErr, callOpts) { + return lastErr + } + } + return lastErr + } +} + +// StreamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +// +// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs +// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, +// BidiStreams), the retry interceptor will fail the call. +func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(parentCtx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return streamer(parentCtx, desc, cc, method, grpcOpts...) + } + if desc.ClientStreams { + return nil, status.Errorf(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()") + } + + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { + return nil, err + } + callCtx := perCallContext(parentCtx, callOpts, 0) + + var newStreamer grpc.ClientStream + newStreamer, lastErr = streamer(callCtx, desc, cc, method, grpcOpts...) + if lastErr == nil { + retryingStreamer := &serverStreamingRetryingStream{ + ClientStream: newStreamer, + callOpts: callOpts, + parentCtx: parentCtx, + streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { + return streamer(ctx, desc, cc, method, grpcOpts...) + }, + } + return retryingStreamer, nil + } + + logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + if isContextError(lastErr) { + if parentCtx.Err() != nil { + logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) + // its the parent context deadline or cancellation. + return nil, lastErr + } else if callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) + continue + } + } + if !isRetriable(lastErr, callOpts) { + return nil, lastErr + } + } + return nil, lastErr + } +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type serverStreamingRetryingStream struct { + grpc.ClientStream + bufferedSends []interface{} // single message that the client can sen + receivedGood bool // indicates whether any prior receives were successful + wasClosedSend bool // indicates that CloseSend was closed + parentCtx context.Context + callOpts *options + streamerCall func(ctx context.Context) (grpc.ClientStream, error) + mu sync.RWMutex +} + +func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { + s.mu.Lock() + s.ClientStream = clientStream + s.mu.Unlock() +} + +func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ClientStream +} + +func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { + s.mu.Lock() + s.bufferedSends = append(s.bufferedSends, m) + s.mu.Unlock() + return s.getStream().SendMsg(m) +} + +func (s *serverStreamingRetryingStream) CloseSend() error { + s.mu.Lock() + s.wasClosedSend = true + s.mu.Unlock() + return s.getStream().CloseSend() +} + +func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { + return s.getStream().Header() +} + +func (s *serverStreamingRetryingStream) Trailer() metadata.MD { + return s.getStream().Trailer() +} + +func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { + attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr // success or hard failure + } + // We start off from attempt 1, because zeroth was already made on normal SendMsg(). + for attempt := uint(1); attempt < s.callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, s.parentCtx, s.callOpts); err != nil { + return err + } + callCtx := perCallContext(s.parentCtx, s.callOpts, attempt) + newStream, err := s.reestablishStreamAndResendBuffer(callCtx) + if err != nil { + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + return err + } + s.setStream(newStream) + attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) + //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr) + if !attemptRetry { + return lastErr + } + } + return lastErr +} + +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { + s.mu.RLock() + wasGood := s.receivedGood + s.mu.RUnlock() + err := s.getStream().RecvMsg(m) + if err == nil || err == io.EOF { + s.mu.Lock() + s.receivedGood = true + s.mu.Unlock() + return false, err + } else if wasGood { + // previous RecvMsg in the stream succeeded, no retry logic should interfere + return false, err + } + if isContextError(err) { + if s.parentCtx.Err() != nil { + logTrace(s.parentCtx, "grpc_retry parent context error: %v", s.parentCtx.Err()) + return false, err + } else if s.callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(s.parentCtx, "grpc_retry context error from retry call") + return true, err + } + } + return isRetriable(err, s.callOpts), err +} + +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { + s.mu.RLock() + bufferedSends := s.bufferedSends + s.mu.RUnlock() + newStream, err := s.streamerCall(callCtx) + if err != nil { + logTrace(callCtx, "grpc_retry failed redialing new stream: %v", err) + return nil, err + } + for _, msg := range bufferedSends { + if err := newStream.SendMsg(msg); err != nil { + logTrace(callCtx, "grpc_retry failed resending message: %v", err) + return nil, err + } + } + if err := newStream.CloseSend(); err != nil { + logTrace(callCtx, "grpc_retry failed CloseSend on new stream %v", err) + return nil, err + } + return newStream, nil +} + +func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options) error { + var waitTime time.Duration = 0 + if attempt > 0 { + waitTime = callOpts.backoffFunc(parentCtx, attempt) + } + if waitTime > 0 { + logTrace(parentCtx, "grpc_retry attempt: %d, backoff for %v", attempt, waitTime) + timer := time.NewTimer(waitTime) + select { + case <-parentCtx.Done(): + timer.Stop() + return contextErrToGrpcErr(parentCtx.Err()) + case <-timer.C: + } + } + return nil +} + +func isRetriable(err error, callOpts *options) bool { + errCode := status.Code(err) + if isContextError(err) { + // context errors are not retriable based on user settings. + return false + } + for _, code := range callOpts.codes { + if code == errCode { + return true + } + } + return false +} + +func isContextError(err error) bool { + code := status.Code(err) + return code == codes.DeadlineExceeded || code == codes.Canceled +} + +func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) context.Context { + ctx := parentCtx + if callOpts.perCallTimeout != 0 { + ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout) + } + if attempt > 0 && callOpts.includeHeader { + mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) + ctx = mdClone.ToOutgoing(ctx) + } + return ctx +} + +func contextErrToGrpcErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Errorf(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Errorf(codes.Canceled, err.Error()) + default: + return status.Errorf(codes.Unknown, err.Error()) + } +} + +func logTrace(ctx context.Context, format string, a ...interface{}) { + tr, ok := trace.FromContext(ctx) + if !ok { + return + } + tr.LazyPrintf(format, a...) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go new file mode 100644 index 000000000..4e69a6305 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go @@ -0,0 +1,28 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +Backoff Helper Utilities + +Implements common backoff features. +*/ +package backoffutils + +import ( + "math/rand" + "time" +) + +// JitterUp adds random jitter to the duration. +// +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +func JitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// ExponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0. +func ExponentBase2(a uint) uint { + return (1 << a) >> 1 +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go new file mode 100644 index 000000000..1ed9bb499 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go @@ -0,0 +1,19 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +Package `metautils` provides convenience functions for dealing with gRPC metadata.MD objects inside +Context handlers. + +While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) +they are hard to use. + +The majority of functions center around the NiceMD, which is a convenience wrapper around metadata.MD. For example +the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context +metadata. + + nmd := metautils.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") + clientCtx := nmd.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) +*/ + +package metautils diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go new file mode 100644 index 000000000..9f0456747 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go @@ -0,0 +1,126 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package metautils + +import ( + "context" + "strings" + + "google.golang.org/grpc/metadata" +) + +// NiceMD is a convenience wrapper definiting extra functions on the metadata. +type NiceMD metadata.MD + +// ExtractIncoming extracts an inbound metadata from the server-side context. +// +// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns +// a new empty NiceMD. +func ExtractIncoming(ctx context.Context) NiceMD { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return NiceMD(metadata.Pairs()) + } + return NiceMD(md) +} + +// ExtractOutgoing extracts an outbound metadata from the client-side context. +// +// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns +// a new empty NiceMD. +func ExtractOutgoing(ctx context.Context) NiceMD { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + return NiceMD(metadata.Pairs()) + } + return NiceMD(md) +} + +// Clone performs a *deep* copy of the metadata.MD. +// +// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// all keys get copied. +func (m NiceMD) Clone(copiedKeys ...string) NiceMD { + newMd := NiceMD(metadata.Pairs()) + for k, vv := range m { + found := false + if len(copiedKeys) == 0 { + found = true + } else { + for _, allowedKey := range copiedKeys { + if strings.EqualFold(allowedKey, k) { + found = true + break + } + } + } + if !found { + continue + } + newMd[k] = make([]string, len(vv)) + copy(newMd[k], vv) + } + return NiceMD(newMd) +} + +// ToOutgoing sets the given NiceMD as a client-side context for dispatching. +func (m NiceMD) ToOutgoing(ctx context.Context) context.Context { + return metadata.NewOutgoingContext(ctx, metadata.MD(m)) +} + +// ToIncoming sets the given NiceMD as a server-side context for dispatching. +// +// This is mostly useful in ServerInterceptors.. +func (m NiceMD) ToIncoming(ctx context.Context) context.Context { + return metadata.NewIncomingContext(ctx, metadata.MD(m)) +} + +// Get retrieves a single value from the metadata. +// +// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, +// an empty string is returned. +// +// The function is binary-key safe. +func (m NiceMD) Get(key string) string { + k, _ := encodeKeyValue(key, "") + vv, ok := m[k] + if !ok { + return "" + } + return vv[0] +} + +// Del retrieves a single value from the metadata. +// +// It works analogously to http.Header.Del, deleting all values if they exist. +// +// The function is binary-key safe. + +func (m NiceMD) Del(key string) NiceMD { + k, _ := encodeKeyValue(key, "") + delete(m, k) + return m +} + +// Set sets the given value in a metadata. +// +// It works analogously to http.Header.Set, overwriting all previous metadata values. +// +// The function is binary-key safe. +func (m NiceMD) Set(key string, value string) NiceMD { + k, v := encodeKeyValue(key, value) + m[k] = []string{v} + return m +} + +// Add retrieves a single value from the metadata. +// +// It works analogously to http.Header.Add, as it appends to any existing values associated with key. +// +// The function is binary-key safe. +func (m NiceMD) Add(key string, value string) NiceMD { + k, v := encodeKeyValue(key, value) + m[k] = append(m[k], v) + return m +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go new file mode 100644 index 000000000..8a5387166 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go @@ -0,0 +1,22 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package metautils + +import ( + "encoding/base64" + "strings" +) + +const ( + binHdrSuffix = "-bin" +) + +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + val := base64.StdEncoding.EncodeToString([]byte(v)) + v = string(val) + } + return k, v +} diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go index 7b76b4b89..7e86dc878 100644 --- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -30,6 +30,19 @@ func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { return intercept } +func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { + i.Logger.Log(level, msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...) + } +} + // Emit the message and args at TRACE level to log and sinks func (i *interceptLogger) Trace(msg string, args ...interface{}) { i.Logger.Trace(msg, args...) diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 65129ff74..0786c924b 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -120,7 +120,7 @@ func newLogger(opts *LoggerOptions) *intLogger { // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. -func (l *intLogger) Log(name string, level Level, msg string, args ...interface{}) { +func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { if level < Level(atomic.LoadInt32(l.level)) { return } @@ -133,7 +133,7 @@ func (l *intLogger) Log(name string, level Level, msg string, args ...interface{ if l.json { l.logJSON(t, name, level, msg, args...) } else { - l.log(t, name, level, msg, args...) + l.logPlain(t, name, level, msg, args...) } l.writer.Flush(level) @@ -171,7 +171,7 @@ func trimCallerPath(path string) string { var logImplFile = regexp.MustCompile(`github.com/hashicorp/go-hclog/.+logger.go$`) // Non-JSON logging format function -func (l *intLogger) log(t time.Time, name string, level Level, msg string, args ...interface{}) { +func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { l.writer.WriteString(t.Format(l.timeFormat)) l.writer.WriteByte(' ') @@ -431,29 +431,34 @@ func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg strin return vals } +// Emit the message and args at the provided level +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + l.log(l.Name(), level, msg, args...) +} + // Emit the message and args at DEBUG level func (l *intLogger) Debug(msg string, args ...interface{}) { - l.Log(l.Name(), Debug, msg, args...) + l.log(l.Name(), Debug, msg, args...) } // Emit the message and args at TRACE level func (l *intLogger) Trace(msg string, args ...interface{}) { - l.Log(l.Name(), Trace, msg, args...) + l.log(l.Name(), Trace, msg, args...) } // Emit the message and args at INFO level func (l *intLogger) Info(msg string, args ...interface{}) { - l.Log(l.Name(), Info, msg, args...) + l.log(l.Name(), Info, msg, args...) } // Emit the message and args at WARN level func (l *intLogger) Warn(msg string, args ...interface{}) { - l.Log(l.Name(), Warn, msg, args...) + l.log(l.Name(), Warn, msg, args...) } // Emit the message and args at ERROR level func (l *intLogger) Error(msg string, args ...interface{}) { - l.Log(l.Name(), Error, msg, args...) + l.log(l.Name(), Error, msg, args...) } // Indicate that the logger would emit TRACE level logs @@ -593,7 +598,7 @@ func (l *intLogger) checkWriterIsFile() *os.File { // Accept implements the SinkAdapter interface func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { - i.Log(name, level, msg, args...) + i.log(name, level, msg, args...) } // ImpliedArgs returns the loggers implied args diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 48d608714..95b03184f 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -95,6 +95,9 @@ type Logger interface { // Args are alternating key, val pairs // keys must be strings // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at a provided log level + Log(level Level, msg string, args ...interface{}) + // Emit a message and key/value pairs at the TRACE level Trace(msg string, args ...interface{}) diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index 4abdd5583..bc14f7708 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -15,6 +15,8 @@ func NewNullLogger() Logger { type nullLogger struct{} +func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {} + func (l *nullLogger) Trace(msg string, args ...interface{}) {} func (l *nullLogger) Debug(msg string, args ...interface{}) {} diff --git a/vendor/vendor.json b/vendor/vendor.json index c45a12138..079779ddb 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -87,6 +87,7 @@ {"path":"github.com/circonus-labs/circonus-gometrics/api/config","checksumSHA1":"bQhz/fcyZPmuHSH2qwC4ZtATy5c=","revision":"d6e3aea90ab9f90fe8456e13fc520f43d102da4d","revisionTime":"2019-01-28T15:50:09Z","version":"=v2","versionExact":"v2"}, {"path":"github.com/circonus-labs/circonus-gometrics/checkmgr","checksumSHA1":"Ij8yB33E0Kk+GfTkNRoF1mG26dc=","revision":"d6e3aea90ab9f90fe8456e13fc520f43d102da4d","revisionTime":"2019-01-28T15:50:09Z","version":"=v2","versionExact":"v2"}, {"path":"github.com/circonus-labs/circonusllhist","checksumSHA1":"VbfeVqeOM+dTNxCmpvmYS0LwQn0=","revision":"7d649b46cdc2cd2ed102d350688a75a4fd7778c6","revisionTime":"2016-11-21T13:51:53Z"}, + {"path":"github.com/container-storage-interface/spec/lib/go/csi","checksumSHA1":"UG2eSIhT6aFn6zWuz48IhlO+eEE=","revision":"a33ece0a8a9f9449688bad8c3ddb103ecf58749b","revisionTime":"2019-10-21T21:08:49Z","tree":true}, {"path":"github.com/containerd/console","checksumSHA1":"Lc9okmPYuvnmj2yWbW/ioFh2LJE=","revision":"8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6","revisionTime":"2019-12-19T16:52:38Z"}, {"path":"github.com/containerd/containerd/errdefs","checksumSHA1":"ru5eKWdLzXfpNRL+Mi1bxbmY8DU=","revision":"14fbcd886f6e971d86f6e3bed43298491d89f393","revisionTime":"2020-03-14T00:01:32Z"}, {"path":"github.com/containerd/continuity/pathdriver","checksumSHA1":"GqIrOttKaO7k6HIaHQLPr3cY7rY=","origin":"github.com/docker/docker/vendor/github.com/containerd/continuity/pathdriver","revision":"320063a2ad06a1d8ada61c94c29dbe44e2d87473","revisionTime":"2018-08-16T08:14:46Z"}, @@ -196,6 +197,9 @@ {"path":"github.com/gorilla/context","checksumSHA1":"g/V4qrXjUGG9B+e3hB+4NAYJ5Gs=","revision":"08b5f424b9271eedf6f9f0ce86cb9396ed337a42","revisionTime":"2016-08-17T18:46:32Z"}, {"path":"github.com/gorilla/mux","checksumSHA1":"STQSdSj2FcpCf0NLfdsKhNutQT0=","revision":"e48e440e4c92e3251d812f8ce7858944dfa3331c","revisionTime":"2018-08-07T07:52:56Z"}, {"path":"github.com/gorilla/websocket","checksumSHA1":"gr0edNJuVv4+olNNZl5ZmwLgscA=","revision":"0ec3d1bd7fe50c503d6df98ee649d81f4857c564","revisionTime":"2019-03-06T00:42:57Z"}, + {"path":"github.com/grpc-ecosystem/go-grpc-middleware/retry","checksumSHA1":"Wmzc+OYGzhkkXvwphrh/1C7TGmI=","revision":"3ce3d519df39b5289d789b3d54f00c7a19929fe4","revisionTime":"2020-02-28T13:55:17Z"}, + {"path":"github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils","checksumSHA1":"BnZAJHYhcmPNlto3WkzkWnRPlXs=","revision":"3ce3d519df39b5289d789b3d54f00c7a19929fe4","revisionTime":"2020-02-28T13:55:17Z"}, + {"path":"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils","checksumSHA1":"xvSmjSRfCFmWbEs50bSDXyzRWCo=","revision":"3ce3d519df39b5289d789b3d54f00c7a19929fe4","revisionTime":"2020-02-28T13:55:17Z"}, {"path":"github.com/hashicorp/consul-template","checksumSHA1":"R4eLvAFtqPg22sjAUysBhFfdUPs=","revision":"58aa6c608af3387d0c2bf5d028be4960be1dbe56","revisionTime":"2020-01-25T00:24:05Z","version":"v0.24.1","versionExact":"v0.24.1"}, {"path":"github.com/hashicorp/consul-template/child","checksumSHA1":"yQfiSUOpV5BvGeztDd4fcA7qsbw=","revision":"58aa6c608af3387d0c2bf5d028be4960be1dbe56","revisionTime":"2020-01-25T00:24:05Z","version":"v0.24.1","versionExact":"v0.24.1"}, {"path":"github.com/hashicorp/consul-template/config","checksumSHA1":"ldDPZxD2PEPY4F9MFSOG4D8FWo8=","revision":"58aa6c608af3387d0c2bf5d028be4960be1dbe56","revisionTime":"2020-01-25T00:24:05Z","version":"v0.24.1","versionExact":"v0.24.1"}, @@ -239,7 +243,7 @@ {"path":"github.com/hashicorp/go-envparse","checksumSHA1":"FKmqR4DC3nCXtnT9pe02z5CLNWo=","revision":"310ca1881b22af3522e3a8638c0b426629886196","revisionTime":"2018-01-19T21:58:41Z"}, {"path":"github.com/hashicorp/go-getter","checksumSHA1":"d4brua17AGQqMNtngK4xKOUwboY=","revision":"f5101da0117392c6e7960c934f05a2fd689a5b5f","revisionTime":"2019-08-22T19:45:07Z"}, {"path":"github.com/hashicorp/go-getter/helper/url","checksumSHA1":"9J+kDr29yDrwsdu2ULzewmqGjpA=","revision":"b345bfcec894fb7ff3fdf9b21baf2f56ea423d98","revisionTime":"2018-04-10T17:49:45Z"}, - {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"p0y3e3+Oj9GJXM/OW3ISDXap5+w=","revision":"e8a977f5d6b14a15e6672edf8b1d6cd545388c7a","revisionTime":"2019-12-18T17:30:18Z","version":"v0.10.1","versionExact":"v0.10.1"}, + {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"tNgHh706sto5/99XYD5jIuBDqa8=","revision":"0e86804c9e4bede0738cbbc370e705ef82580e7e","revisionTime":"2020-01-11T00:06:39Z","version":"v0.11.0","versionExact":"v0.11.0"}, {"path":"github.com/hashicorp/go-immutable-radix","checksumSHA1":"Cas2nprG6pWzf05A2F/OlnjUu2Y=","revision":"8aac2701530899b64bdea735a1de8da899815220","revisionTime":"2017-07-25T22:12:15Z"}, {"path":"github.com/hashicorp/go-memdb","checksumSHA1":"FMAvwDar2bQyYAW4XMFhAt0J5xA=","revision":"20ff6434c1cc49b80963d45bf5c6aa89c78d8d57","revisionTime":"2017-08-31T20:15:40Z"}, {"path":"github.com/hashicorp/go-msgpack/codec","checksumSHA1":"CKGYNUDKre3Z2g4hHNVfp5nTcfA=","revision":"23165f7bc3c2dda1891434ebb9da1511a7bafc1c","revisionTime":"2019-09-27T12:33:13Z","version":"upstream-08f7b40","versionExact":"upstream-08f7b40"},