From 438657d8221de2ff3ae7b51d75e2fabffb95b686 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 22 Oct 2019 17:18:14 +0200 Subject: [PATCH 001/126] vendor: Init csi protobufs --- .../container-storage-interface/spec/LICENSE | 201 + .../spec/lib/go/csi/csi.pb.go | 5728 +++++++++++++++++ vendor/vendor.json | 1 + 3 files changed, 5930 insertions(+) create mode 100644 vendor/github.com/container-storage-interface/spec/LICENSE create mode 100644 vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go diff --git a/vendor/github.com/container-storage-interface/spec/LICENSE b/vendor/github.com/container-storage-interface/spec/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go new file mode 100644 index 000000000..e2e02d808 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go @@ -0,0 +1,5728 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/container-storage-interface/spec/csi.proto + +package csi + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type PluginCapability_Service_Type int32 + +const ( + PluginCapability_Service_UNKNOWN PluginCapability_Service_Type = 0 + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins MAY wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + PluginCapability_Service_CONTROLLER_SERVICE PluginCapability_Service_Type = 1 + // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for + // this plugin MAY NOT be equally accessible by all nodes in the + // cluster. The CO MUST use the topology information returned by + // CreateVolumeRequest along with the topology information + // returned by NodeGetInfo to ensure that a given volume is + // accessible from a given node when scheduling workloads. + PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS PluginCapability_Service_Type = 2 +) + +var PluginCapability_Service_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CONTROLLER_SERVICE", + 2: "VOLUME_ACCESSIBILITY_CONSTRAINTS", +} + +var PluginCapability_Service_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CONTROLLER_SERVICE": 1, + "VOLUME_ACCESSIBILITY_CONSTRAINTS": 2, +} + +func (x PluginCapability_Service_Type) String() string { + return proto.EnumName(PluginCapability_Service_Type_name, int32(x)) +} + +func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{4, 0, 0} +} + +type PluginCapability_VolumeExpansion_Type int32 + +const ( + PluginCapability_VolumeExpansion_UNKNOWN PluginCapability_VolumeExpansion_Type = 0 + // ONLINE indicates that volumes may be expanded when published to + // a node. When a Plugin implements this capability it MUST + // implement either the EXPAND_VOLUME controller capability or the + // EXPAND_VOLUME node capability or both. When a plugin supports + // ONLINE volume expansion and also has the EXPAND_VOLUME + // controller capability then the plugin MUST support expansion of + // volumes currently published and available on a node. When a + // plugin supports ONLINE volume expansion and also has the + // EXPAND_VOLUME node capability then the plugin MAY support + // expansion of node-published volume via NodeExpandVolume. + // + // Example 1: Given a shared filesystem volume (e.g. GlusterFs), + // the Plugin may set the ONLINE volume expansion capability and + // implement ControllerExpandVolume but not NodeExpandVolume. + // + // Example 2: Given a block storage volume type (e.g. EBS), the + // Plugin may set the ONLINE volume expansion capability and + // implement both ControllerExpandVolume and NodeExpandVolume. + // + // Example 3: Given a Plugin that supports volume expansion only + // upon a node, the Plugin may set the ONLINE volume + // expansion capability and implement NodeExpandVolume but not + // ControllerExpandVolume. + PluginCapability_VolumeExpansion_ONLINE PluginCapability_VolumeExpansion_Type = 1 + // OFFLINE indicates that volumes currently published and + // available on a node SHALL NOT be expanded via + // ControllerExpandVolume. When a plugin supports OFFLINE volume + // expansion it MUST implement either the EXPAND_VOLUME controller + // capability or both the EXPAND_VOLUME controller capability and + // the EXPAND_VOLUME node capability. + // + // Example 1: Given a block storage volume type (e.g. Azure Disk) + // that does not support expansion of "node-attached" (i.e. + // controller-published) volumes, the Plugin may indicate + // OFFLINE volume expansion support and implement both + // ControllerExpandVolume and NodeExpandVolume. + PluginCapability_VolumeExpansion_OFFLINE PluginCapability_VolumeExpansion_Type = 2 +) + +var PluginCapability_VolumeExpansion_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ONLINE", + 2: "OFFLINE", +} + +var PluginCapability_VolumeExpansion_Type_value = map[string]int32{ + "UNKNOWN": 0, + "ONLINE": 1, + "OFFLINE": 2, +} + +func (x PluginCapability_VolumeExpansion_Type) String() string { + return proto.EnumName(PluginCapability_VolumeExpansion_Type_name, int32(x)) +} + +func (PluginCapability_VolumeExpansion_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{4, 1, 0} +} + +type VolumeCapability_AccessMode_Mode int32 + +const ( + VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 + // Can only be published once as read/write on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 + // Can only be published once as readonly on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 + // Can be published as readonly at multiple nodes simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 + // Can be published as read/write at multiple nodes + // simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 +) + +var VolumeCapability_AccessMode_Mode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SINGLE_NODE_WRITER", + 2: "SINGLE_NODE_READER_ONLY", + 3: "MULTI_NODE_READER_ONLY", + 4: "MULTI_NODE_SINGLE_WRITER", + 5: "MULTI_NODE_MULTI_WRITER", +} + +var VolumeCapability_AccessMode_Mode_value = map[string]int32{ + "UNKNOWN": 0, + "SINGLE_NODE_WRITER": 1, + "SINGLE_NODE_READER_ONLY": 2, + "MULTI_NODE_READER_ONLY": 3, + "MULTI_NODE_SINGLE_WRITER": 4, + "MULTI_NODE_MULTI_WRITER": 5, +} + +func (x VolumeCapability_AccessMode_Mode) String() string { + return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) +} + +func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{10, 2, 0} +} + +type ControllerServiceCapability_RPC_Type int32 + +const ( + ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 + ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 + ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 + ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 + ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 + // Currently the only way to consume a snapshot is to create + // a volume from it. Therefore plugins supporting + // CREATE_DELETE_SNAPSHOT MUST support creating volume from + // snapshot. + ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT ControllerServiceCapability_RPC_Type = 5 + ControllerServiceCapability_RPC_LIST_SNAPSHOTS ControllerServiceCapability_RPC_Type = 6 + // Plugins supporting volume cloning at the storage level MAY + // report this capability. The source volume MUST be managed by + // the same plugin. Not all volume sources and parameters + // combinations MAY work. + ControllerServiceCapability_RPC_CLONE_VOLUME ControllerServiceCapability_RPC_Type = 7 + // Indicates the SP supports ControllerPublishVolume.readonly + // field. + ControllerServiceCapability_RPC_PUBLISH_READONLY ControllerServiceCapability_RPC_Type = 8 + // See VolumeExpansion for details. + ControllerServiceCapability_RPC_EXPAND_VOLUME ControllerServiceCapability_RPC_Type = 9 + // Indicates the SP supports the + // ListVolumesResponse.entry.published_nodes field + ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES ControllerServiceCapability_RPC_Type = 10 +) + +var ControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_VOLUME", + 2: "PUBLISH_UNPUBLISH_VOLUME", + 3: "LIST_VOLUMES", + 4: "GET_CAPACITY", + 5: "CREATE_DELETE_SNAPSHOT", + 6: "LIST_SNAPSHOTS", + 7: "CLONE_VOLUME", + 8: "PUBLISH_READONLY", + 9: "EXPAND_VOLUME", + 10: "LIST_VOLUMES_PUBLISHED_NODES", +} + +var ControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_VOLUME": 1, + "PUBLISH_UNPUBLISH_VOLUME": 2, + "LIST_VOLUMES": 3, + "GET_CAPACITY": 4, + "CREATE_DELETE_SNAPSHOT": 5, + "LIST_SNAPSHOTS": 6, + "CLONE_VOLUME": 7, + "PUBLISH_READONLY": 8, + "EXPAND_VOLUME": 9, + "LIST_VOLUMES_PUBLISHED_NODES": 10, +} + +func (x ControllerServiceCapability_RPC_Type) String() string { + return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) +} + +func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{29, 0, 0} +} + +type VolumeUsage_Unit int32 + +const ( + VolumeUsage_UNKNOWN VolumeUsage_Unit = 0 + VolumeUsage_BYTES VolumeUsage_Unit = 1 + VolumeUsage_INODES VolumeUsage_Unit = 2 +) + +var VolumeUsage_Unit_name = map[int32]string{ + 0: "UNKNOWN", + 1: "BYTES", + 2: "INODES", +} + +var VolumeUsage_Unit_value = map[string]int32{ + "UNKNOWN": 0, + "BYTES": 1, + "INODES": 2, +} + +func (x VolumeUsage_Unit) String() string { + return proto.EnumName(VolumeUsage_Unit_name, int32(x)) +} + +func (VolumeUsage_Unit) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{49, 0} +} + +type NodeServiceCapability_RPC_Type int32 + +const ( + NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 + NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME NodeServiceCapability_RPC_Type = 1 + // If Plugin implements GET_VOLUME_STATS capability + // then it MUST implement NodeGetVolumeStats RPC + // call for fetching volume statistics. + NodeServiceCapability_RPC_GET_VOLUME_STATS NodeServiceCapability_RPC_Type = 2 + // See VolumeExpansion for details. + NodeServiceCapability_RPC_EXPAND_VOLUME NodeServiceCapability_RPC_Type = 3 +) + +var NodeServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STAGE_UNSTAGE_VOLUME", + 2: "GET_VOLUME_STATS", + 3: "EXPAND_VOLUME", +} + +var NodeServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STAGE_UNSTAGE_VOLUME": 1, + "GET_VOLUME_STATS": 2, + "EXPAND_VOLUME": 3, +} + +func (x NodeServiceCapability_RPC_Type) String() string { + return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) +} + +func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{52, 0, 0} +} + +type GetPluginInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } +func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoRequest) ProtoMessage() {} +func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{0} +} + +func (m *GetPluginInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginInfoRequest.Unmarshal(m, b) +} +func (m *GetPluginInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginInfoRequest.Marshal(b, m, deterministic) +} +func (m *GetPluginInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoRequest.Merge(m, src) +} +func (m *GetPluginInfoRequest) XXX_Size() int { + return xxx_messageInfo_GetPluginInfoRequest.Size(m) +} +func (m *GetPluginInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginInfoRequest proto.InternalMessageInfo + +type GetPluginInfoResponse struct { + // The name MUST follow domain name notation format + // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD + // include the plugin's host company name and the plugin name, + // to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. This field is REQUIRED. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This field is REQUIRED. Value of this field is opaque to the CO. + VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion,proto3" json:"vendor_version,omitempty"` + // This field is OPTIONAL. Values are opaque to the CO. + Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest,proto3" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } +func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoResponse) ProtoMessage() {} +func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{1} +} + +func (m *GetPluginInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginInfoResponse.Unmarshal(m, b) +} +func (m *GetPluginInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginInfoResponse.Marshal(b, m, deterministic) +} +func (m *GetPluginInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginInfoResponse.Merge(m, src) +} +func (m *GetPluginInfoResponse) XXX_Size() int { + return xxx_messageInfo_GetPluginInfoResponse.Size(m) +} +func (m *GetPluginInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginInfoResponse proto.InternalMessageInfo + +func (m *GetPluginInfoResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetPluginInfoResponse) GetVendorVersion() string { + if m != nil { + return m.VendorVersion + } + return "" +} + +func (m *GetPluginInfoResponse) GetManifest() map[string]string { + if m != nil { + return m.Manifest + } + return nil +} + +type GetPluginCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginCapabilitiesRequest) Reset() { *m = GetPluginCapabilitiesRequest{} } +func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesRequest) ProtoMessage() {} +func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{2} +} + +func (m *GetPluginCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Unmarshal(m, b) +} +func (m *GetPluginCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (m *GetPluginCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesRequest.Merge(m, src) +} +func (m *GetPluginCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_GetPluginCapabilitiesRequest.Size(m) +} +func (m *GetPluginCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginCapabilitiesRequest proto.InternalMessageInfo + +type GetPluginCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*PluginCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPluginCapabilitiesResponse) Reset() { *m = GetPluginCapabilitiesResponse{} } +func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesResponse) ProtoMessage() {} +func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{3} +} + +func (m *GetPluginCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Unmarshal(m, b) +} +func (m *GetPluginCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (m *GetPluginCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPluginCapabilitiesResponse.Merge(m, src) +} +func (m *GetPluginCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_GetPluginCapabilitiesResponse.Size(m) +} +func (m *GetPluginCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPluginCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPluginCapabilitiesResponse proto.InternalMessageInfo + +func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the plugin. +type PluginCapability struct { + // Types that are valid to be assigned to Type: + // *PluginCapability_Service_ + // *PluginCapability_VolumeExpansion_ + Type isPluginCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability) Reset() { *m = PluginCapability{} } +func (m *PluginCapability) String() string { return proto.CompactTextString(m) } +func (*PluginCapability) ProtoMessage() {} +func (*PluginCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{4} +} + +func (m *PluginCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability.Unmarshal(m, b) +} +func (m *PluginCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability.Marshal(b, m, deterministic) +} +func (m *PluginCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability.Merge(m, src) +} +func (m *PluginCapability) XXX_Size() int { + return xxx_messageInfo_PluginCapability.Size(m) +} +func (m *PluginCapability) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability proto.InternalMessageInfo + +type isPluginCapability_Type interface { + isPluginCapability_Type() +} + +type PluginCapability_Service_ struct { + Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,proto3,oneof"` +} + +type PluginCapability_VolumeExpansion_ struct { + VolumeExpansion *PluginCapability_VolumeExpansion `protobuf:"bytes,2,opt,name=volume_expansion,json=volumeExpansion,proto3,oneof"` +} + +func (*PluginCapability_Service_) isPluginCapability_Type() {} + +func (*PluginCapability_VolumeExpansion_) isPluginCapability_Type() {} + +func (m *PluginCapability) GetType() isPluginCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *PluginCapability) GetService() *PluginCapability_Service { + if x, ok := m.GetType().(*PluginCapability_Service_); ok { + return x.Service + } + return nil +} + +func (m *PluginCapability) GetVolumeExpansion() *PluginCapability_VolumeExpansion { + if x, ok := m.GetType().(*PluginCapability_VolumeExpansion_); ok { + return x.VolumeExpansion + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*PluginCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*PluginCapability_Service_)(nil), + (*PluginCapability_VolumeExpansion_)(nil), + } +} + +type PluginCapability_Service struct { + Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_Service_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability_Service) Reset() { *m = PluginCapability_Service{} } +func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) } +func (*PluginCapability_Service) ProtoMessage() {} +func (*PluginCapability_Service) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{4, 0} +} + +func (m *PluginCapability_Service) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability_Service.Unmarshal(m, b) +} +func (m *PluginCapability_Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability_Service.Marshal(b, m, deterministic) +} +func (m *PluginCapability_Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability_Service.Merge(m, src) +} +func (m *PluginCapability_Service) XXX_Size() int { + return xxx_messageInfo_PluginCapability_Service.Size(m) +} +func (m *PluginCapability_Service) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability_Service proto.InternalMessageInfo + +func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type { + if m != nil { + return m.Type + } + return PluginCapability_Service_UNKNOWN +} + +type PluginCapability_VolumeExpansion struct { + Type PluginCapability_VolumeExpansion_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.PluginCapability_VolumeExpansion_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PluginCapability_VolumeExpansion) Reset() { *m = PluginCapability_VolumeExpansion{} } +func (m *PluginCapability_VolumeExpansion) String() string { return proto.CompactTextString(m) } +func (*PluginCapability_VolumeExpansion) ProtoMessage() {} +func (*PluginCapability_VolumeExpansion) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{4, 1} +} + +func (m *PluginCapability_VolumeExpansion) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PluginCapability_VolumeExpansion.Unmarshal(m, b) +} +func (m *PluginCapability_VolumeExpansion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PluginCapability_VolumeExpansion.Marshal(b, m, deterministic) +} +func (m *PluginCapability_VolumeExpansion) XXX_Merge(src proto.Message) { + xxx_messageInfo_PluginCapability_VolumeExpansion.Merge(m, src) +} +func (m *PluginCapability_VolumeExpansion) XXX_Size() int { + return xxx_messageInfo_PluginCapability_VolumeExpansion.Size(m) +} +func (m *PluginCapability_VolumeExpansion) XXX_DiscardUnknown() { + xxx_messageInfo_PluginCapability_VolumeExpansion.DiscardUnknown(m) +} + +var xxx_messageInfo_PluginCapability_VolumeExpansion proto.InternalMessageInfo + +func (m *PluginCapability_VolumeExpansion) GetType() PluginCapability_VolumeExpansion_Type { + if m != nil { + return m.Type + } + return PluginCapability_VolumeExpansion_UNKNOWN +} + +type ProbeRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbeRequest) Reset() { *m = ProbeRequest{} } +func (m *ProbeRequest) String() string { return proto.CompactTextString(m) } +func (*ProbeRequest) ProtoMessage() {} +func (*ProbeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{5} +} + +func (m *ProbeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbeRequest.Unmarshal(m, b) +} +func (m *ProbeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbeRequest.Marshal(b, m, deterministic) +} +func (m *ProbeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeRequest.Merge(m, src) +} +func (m *ProbeRequest) XXX_Size() int { + return xxx_messageInfo_ProbeRequest.Size(m) +} +func (m *ProbeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeRequest proto.InternalMessageInfo + +type ProbeResponse struct { + // Readiness allows a plugin to report its initialization status back + // to the CO. Initialization for some plugins MAY be time consuming + // and it is important for a CO to distinguish between the following + // cases: + // + // 1) The plugin is in an unhealthy state and MAY need restarting. In + // this case a gRPC error code SHALL be returned. + // 2) The plugin is still initializing, but is otherwise perfectly + // healthy. In this case a successful response SHALL be returned + // with a readiness value of `false`. Calls to the plugin's + // Controller and/or Node services MAY fail due to an incomplete + // initialization state. + // 3) The plugin has finished initializing and is ready to service + // calls to its Controller and/or Node services. A successful + // response is returned with a readiness value of `true`. + // + // This field is OPTIONAL. If not present, the caller SHALL assume + // that the plugin is in a ready state and is accepting calls to its + // Controller and/or Node services (according to the plugin's reported + // capabilities). + Ready *wrappers.BoolValue `protobuf:"bytes,1,opt,name=ready,proto3" json:"ready,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbeResponse) Reset() { *m = ProbeResponse{} } +func (m *ProbeResponse) String() string { return proto.CompactTextString(m) } +func (*ProbeResponse) ProtoMessage() {} +func (*ProbeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{6} +} + +func (m *ProbeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbeResponse.Unmarshal(m, b) +} +func (m *ProbeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbeResponse.Marshal(b, m, deterministic) +} +func (m *ProbeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbeResponse.Merge(m, src) +} +func (m *ProbeResponse) XXX_Size() int { + return xxx_messageInfo_ProbeResponse.Size(m) +} +func (m *ProbeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ProbeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbeResponse proto.InternalMessageInfo + +func (m *ProbeResponse) GetReady() *wrappers.BoolValue { + if m != nil { + return m.Ready + } + return nil +} + +type CreateVolumeRequest struct { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. The Plugin SHOULD ensure that multiple + // `CreateVolume` calls for the same name do not result in more + // than one piece of storage provisioned corresponding to that + // name. If a Plugin is unable to enforce idempotency, the CO's + // error recovery logic could result in multiple (unused) volumes + // being provisioned. + // In the case of error, the CO MUST handle the gRPC error codes + // per the recovery behavior defined in the "CreateVolume Errors" + // section below. + // The CO is responsible for cleaning up volumes it provisioned + // that it no longer needs. If the CO is uncertain whether a volume + // was provisioned or not when a `CreateVolume` call fails, the CO + // MAY call `CreateVolume` again, with the same name, to ensure the + // volume exists and to retrieve the volume's `volume_id` (unless + // otherwise prohibited by "CreateVolume Errors"). + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. If + // specified it MUST always be honored, even when creating volumes + // from a source; which MAY force some backends to internally extend + // the volume after creating it. + CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"` + // The capabilities that the provisioned volume MUST have. SP MUST + // provision a volume that will satisfy ALL of the capabilities + // specified in this list. Otherwise SP MUST return the appropriate + // gRPC error code. + // The Plugin MUST assume that the CO MAY use the provisioned volume + // with ANY of the capabilities specified in this list. + // For example, a CO MAY specify two volume capabilities: one with + // access mode SINGLE_NODE_WRITER and another with access mode + // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the + // provisioned volume can be used in either mode. + // This also enables the CO to do early validation: If ANY of the + // specified volume capabilities are not supported by the SP, the call + // MUST return the appropriate gRPC error code. + // This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by plugin to complete volume creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, the new volume will be pre-populated with data from + // this source. This field is OPTIONAL. + VolumeContentSource *VolumeContentSource `protobuf:"bytes,6,opt,name=volume_content_source,json=volumeContentSource,proto3" json:"volume_content_source,omitempty"` + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume MUST be accessible from. + // An SP SHALL advertise the requirements for topological + // accessibility information in documentation. COs SHALL only specify + // topological accessibility information supported by the SP. + // This field is OPTIONAL. + // This field SHALL NOT be specified unless the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // If this field is not specified and the SP has the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY + // choose where the provisioned volume is accessible from. + AccessibilityRequirements *TopologyRequirement `protobuf:"bytes,7,opt,name=accessibility_requirements,json=accessibilityRequirements,proto3" json:"accessibility_requirements,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } +func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeRequest) ProtoMessage() {} +func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{7} +} + +func (m *CreateVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeRequest.Unmarshal(m, b) +} +func (m *CreateVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeRequest.Marshal(b, m, deterministic) +} +func (m *CreateVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeRequest.Merge(m, src) +} +func (m *CreateVolumeRequest) XXX_Size() int { + return xxx_messageInfo_CreateVolumeRequest.Size(m) +} +func (m *CreateVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeRequest proto.InternalMessageInfo + +func (m *CreateVolumeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *CreateVolumeRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *CreateVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeContentSource() *VolumeContentSource { + if m != nil { + return m.VolumeContentSource + } + return nil +} + +func (m *CreateVolumeRequest) GetAccessibilityRequirements() *TopologyRequirement { + if m != nil { + return m.AccessibilityRequirements + } + return nil +} + +// Specifies what source the volume will be created from. One of the +// type fields MUST be specified. +type VolumeContentSource struct { + // Types that are valid to be assigned to Type: + // *VolumeContentSource_Snapshot + // *VolumeContentSource_Volume + Type isVolumeContentSource_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource) Reset() { *m = VolumeContentSource{} } +func (m *VolumeContentSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource) ProtoMessage() {} +func (*VolumeContentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{8} +} + +func (m *VolumeContentSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource.Unmarshal(m, b) +} +func (m *VolumeContentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource.Marshal(b, m, deterministic) +} +func (m *VolumeContentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource.Merge(m, src) +} +func (m *VolumeContentSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource.Size(m) +} +func (m *VolumeContentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource proto.InternalMessageInfo + +type isVolumeContentSource_Type interface { + isVolumeContentSource_Type() +} + +type VolumeContentSource_Snapshot struct { + Snapshot *VolumeContentSource_SnapshotSource `protobuf:"bytes,1,opt,name=snapshot,proto3,oneof"` +} + +type VolumeContentSource_Volume struct { + Volume *VolumeContentSource_VolumeSource `protobuf:"bytes,2,opt,name=volume,proto3,oneof"` +} + +func (*VolumeContentSource_Snapshot) isVolumeContentSource_Type() {} + +func (*VolumeContentSource_Volume) isVolumeContentSource_Type() {} + +func (m *VolumeContentSource) GetType() isVolumeContentSource_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *VolumeContentSource) GetSnapshot() *VolumeContentSource_SnapshotSource { + if x, ok := m.GetType().(*VolumeContentSource_Snapshot); ok { + return x.Snapshot + } + return nil +} + +func (m *VolumeContentSource) GetVolume() *VolumeContentSource_VolumeSource { + if x, ok := m.GetType().(*VolumeContentSource_Volume); ok { + return x.Volume + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*VolumeContentSource) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*VolumeContentSource_Snapshot)(nil), + (*VolumeContentSource_Volume)(nil), + } +} + +type VolumeContentSource_SnapshotSource struct { + // Contains identity information for the existing source snapshot. + // This field is REQUIRED. Plugin is REQUIRED to support creating + // volume from snapshot if it supports the capability + // CREATE_DELETE_SNAPSHOT. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource_SnapshotSource) Reset() { *m = VolumeContentSource_SnapshotSource{} } +func (m *VolumeContentSource_SnapshotSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource_SnapshotSource) ProtoMessage() {} +func (*VolumeContentSource_SnapshotSource) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{8, 0} +} + +func (m *VolumeContentSource_SnapshotSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Unmarshal(m, b) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Marshal(b, m, deterministic) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource_SnapshotSource.Merge(m, src) +} +func (m *VolumeContentSource_SnapshotSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource_SnapshotSource.Size(m) +} +func (m *VolumeContentSource_SnapshotSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource_SnapshotSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource_SnapshotSource proto.InternalMessageInfo + +func (m *VolumeContentSource_SnapshotSource) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +type VolumeContentSource_VolumeSource struct { + // Contains identity information for the existing source volume. + // This field is REQUIRED. Plugins reporting CLONE_VOLUME + // capability MUST support creating a volume from another volume. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeContentSource_VolumeSource) Reset() { *m = VolumeContentSource_VolumeSource{} } +func (m *VolumeContentSource_VolumeSource) String() string { return proto.CompactTextString(m) } +func (*VolumeContentSource_VolumeSource) ProtoMessage() {} +func (*VolumeContentSource_VolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{8, 1} +} + +func (m *VolumeContentSource_VolumeSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeContentSource_VolumeSource.Unmarshal(m, b) +} +func (m *VolumeContentSource_VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeContentSource_VolumeSource.Marshal(b, m, deterministic) +} +func (m *VolumeContentSource_VolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeContentSource_VolumeSource.Merge(m, src) +} +func (m *VolumeContentSource_VolumeSource) XXX_Size() int { + return xxx_messageInfo_VolumeContentSource_VolumeSource.Size(m) +} +func (m *VolumeContentSource_VolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeContentSource_VolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeContentSource_VolumeSource proto.InternalMessageInfo + +func (m *VolumeContentSource_VolumeSource) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +type CreateVolumeResponse struct { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } +func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeResponse) ProtoMessage() {} +func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{9} +} + +func (m *CreateVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateVolumeResponse.Unmarshal(m, b) +} +func (m *CreateVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateVolumeResponse.Marshal(b, m, deterministic) +} +func (m *CreateVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateVolumeResponse.Merge(m, src) +} +func (m *CreateVolumeResponse) XXX_Size() int { + return xxx_messageInfo_CreateVolumeResponse.Size(m) +} +func (m *CreateVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateVolumeResponse proto.InternalMessageInfo + +func (m *CreateVolumeResponse) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +// Specify a capability of a volume. +type VolumeCapability struct { + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + // + // Types that are valid to be assigned to AccessType: + // *VolumeCapability_Block + // *VolumeCapability_Mount + AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` + // This is a REQUIRED field. + AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode,proto3" json:"access_mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } +func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability) ProtoMessage() {} +func (*VolumeCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{10} +} + +func (m *VolumeCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability.Unmarshal(m, b) +} +func (m *VolumeCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability.Marshal(b, m, deterministic) +} +func (m *VolumeCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability.Merge(m, src) +} +func (m *VolumeCapability) XXX_Size() int { + return xxx_messageInfo_VolumeCapability.Size(m) +} +func (m *VolumeCapability) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability proto.InternalMessageInfo + +type isVolumeCapability_AccessType interface { + isVolumeCapability_AccessType() +} + +type VolumeCapability_Block struct { + Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,proto3,oneof"` +} + +type VolumeCapability_Mount struct { + Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,proto3,oneof"` +} + +func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} + +func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} + +func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { + if m != nil { + return m.AccessType + } + return nil +} + +func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { + return x.Block + } + return nil +} + +func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { + return x.Mount + } + return nil +} + +func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { + if m != nil { + return m.AccessMode + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*VolumeCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*VolumeCapability_Block)(nil), + (*VolumeCapability_Mount)(nil), + } +} + +// Indicate that the volume will be accessed via the block device API. +type VolumeCapability_BlockVolume struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } +func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_BlockVolume) ProtoMessage() {} +func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{10, 0} +} + +func (m *VolumeCapability_BlockVolume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_BlockVolume.Unmarshal(m, b) +} +func (m *VolumeCapability_BlockVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_BlockVolume.Marshal(b, m, deterministic) +} +func (m *VolumeCapability_BlockVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_BlockVolume.Merge(m, src) +} +func (m *VolumeCapability_BlockVolume) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_BlockVolume.Size(m) +} +func (m *VolumeCapability_BlockVolume) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_BlockVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_BlockVolume proto.InternalMessageInfo + +// Indicate that the volume will be accessed via the filesystem API. +type VolumeCapability_MountVolume struct { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType,proto3" json:"fs_type,omitempty"` + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags,proto3" json:"mount_flags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } +func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_MountVolume) ProtoMessage() {} +func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{10, 1} +} + +func (m *VolumeCapability_MountVolume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_MountVolume.Unmarshal(m, b) +} +func (m *VolumeCapability_MountVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_MountVolume.Marshal(b, m, deterministic) +} +func (m *VolumeCapability_MountVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_MountVolume.Merge(m, src) +} +func (m *VolumeCapability_MountVolume) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_MountVolume.Size(m) +} +func (m *VolumeCapability_MountVolume) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_MountVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_MountVolume proto.InternalMessageInfo + +func (m *VolumeCapability_MountVolume) GetFsType() string { + if m != nil { + return m.FsType + } + return "" +} + +func (m *VolumeCapability_MountVolume) GetMountFlags() []string { + if m != nil { + return m.MountFlags + } + return nil +} + +// Specify how a volume can be accessed. +type VolumeCapability_AccessMode struct { + // This field is REQUIRED. + Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=csi.v1.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } +func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_AccessMode) ProtoMessage() {} +func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{10, 2} +} + +func (m *VolumeCapability_AccessMode) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeCapability_AccessMode.Unmarshal(m, b) +} +func (m *VolumeCapability_AccessMode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeCapability_AccessMode.Marshal(b, m, deterministic) +} +func (m *VolumeCapability_AccessMode) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeCapability_AccessMode.Merge(m, src) +} +func (m *VolumeCapability_AccessMode) XXX_Size() int { + return xxx_messageInfo_VolumeCapability_AccessMode.Size(m) +} +func (m *VolumeCapability_AccessMode) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeCapability_AccessMode.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeCapability_AccessMode proto.InternalMessageInfo + +func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { + if m != nil { + return m.Mode + } + return VolumeCapability_AccessMode_UNKNOWN +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` SHALL be set to the same value. At +// least one of the these fields MUST be specified. +type CapacityRange struct { + // Volume MUST be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + RequiredBytes int64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes,proto3" json:"required_bytes,omitempty"` + // Volume MUST not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + LimitBytes int64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes,proto3" json:"limit_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CapacityRange) Reset() { *m = CapacityRange{} } +func (m *CapacityRange) String() string { return proto.CompactTextString(m) } +func (*CapacityRange) ProtoMessage() {} +func (*CapacityRange) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{11} +} + +func (m *CapacityRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CapacityRange.Unmarshal(m, b) +} +func (m *CapacityRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CapacityRange.Marshal(b, m, deterministic) +} +func (m *CapacityRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_CapacityRange.Merge(m, src) +} +func (m *CapacityRange) XXX_Size() int { + return xxx_messageInfo_CapacityRange.Size(m) +} +func (m *CapacityRange) XXX_DiscardUnknown() { + xxx_messageInfo_CapacityRange.DiscardUnknown(m) +} + +var xxx_messageInfo_CapacityRange proto.InternalMessageInfo + +func (m *CapacityRange) GetRequiredBytes() int64 { + if m != nil { + return m.RequiredBytes + } + return 0 +} + +func (m *CapacityRange) GetLimitBytes() int64 { + if m != nil { + return m.LimitBytes + } + return 0 +} + +// Information about a specific volume. +type Volume struct { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` + // The identifier for this volume, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific volume vs all other volumes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this volume. + // The SP is NOT responsible for global uniqueness of volume_id across + // multiple SPs. + VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Opaque static properties of the volume. SP MAY use this field to + // ensure subsequent volume validation and publishing calls have + // contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // A volume uniquely identified by `volume_id` SHALL always report the + // same volume_context. + // This field is OPTIONAL and when present MUST be passed to volume + // validation and publishing calls. + VolumeContext map[string]string `protobuf:"bytes,3,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, indicates that the volume is not empty and is + // pre-populated with data from the specified source. + // This field is OPTIONAL. + ContentSource *VolumeContentSource `protobuf:"bytes,4,opt,name=content_source,json=contentSource,proto3" json:"content_source,omitempty"` + // Specifies where (regions, zones, racks, etc.) the provisioned + // volume is accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // An SP MAY specify multiple topologies to indicate the volume is + // accessible from multiple locations. + // COs MAY use this information along with the topology information + // returned by NodeGetInfo to ensure that a given volume is accessible + // from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the volume is equally accessible from all nodes in the cluster and + // MAY schedule workloads referencing the volume on any available + // node. + // + // Example 1: + // accessible_topology = {"region": "R1", "zone": "Z2"} + // Indicates a volume accessible only from the "region" "R1" and the + // "zone" "Z2". + // + // Example 2: + // accessible_topology = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // Indicates a volume accessible from both "zone" "Z2" and "zone" "Z3" + // in the "region" "R1". + AccessibleTopology []*Topology `protobuf:"bytes,5,rep,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (m *Volume) String() string { return proto.CompactTextString(m) } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{12} +} + +func (m *Volume) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Volume.Unmarshal(m, b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Volume.Marshal(b, m, deterministic) +} +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return xxx_messageInfo_Volume.Size(m) +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) +} + +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *Volume) GetCapacityBytes() int64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +func (m *Volume) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *Volume) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +func (m *Volume) GetContentSource() *VolumeContentSource { + if m != nil { + return m.ContentSource + } + return nil +} + +func (m *Volume) GetAccessibleTopology() []*Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type TopologyRequirement struct { + // Specifies the list of topologies the provisioned volume MUST be + // accessible from. + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // If requisite is specified, the provisioned volume MUST be + // accessible from at least one of the requisite topologies. + // + // Given + // x = number of topologies provisioned volume is accessible from + // n = number of requisite topologies + // The CO MUST ensure n >= 1. The SP MUST ensure x >= 1 + // If x==n, then the SP MUST make the provisioned volume available to + // all topologies from the list of requisite topologies. If it is + // unable to do so, the SP MUST fail the CreateVolume call. + // For example, if a volume should be accessible from a single zone, + // and requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2". + // Similarly, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and both "zone" "Z2" and "zone" "Z3". + // + // If xn, then the SP MUST make the provisioned volume available from + // all topologies from the list of requisite topologies and MAY choose + // the remaining x-n unique topologies from the list of all possible + // topologies. If it is unable to do so, the SP MUST fail the + // CreateVolume call. + // For example, if a volume should be accessible from two zones, and + // requisite = + // {"region": "R1", "zone": "Z2"} + // then the provisioned volume MUST be accessible from the "region" + // "R1" and the "zone" "Z2" and the SP may select the second zone + // independently, e.g. "R1/Z4". + Requisite []*Topology `protobuf:"bytes,1,rep,name=requisite,proto3" json:"requisite,omitempty"` + // Specifies the list of topologies the CO would prefer the volume to + // be provisioned in. + // + // This field is OPTIONAL. If TopologyRequirement is specified either + // requisite or preferred or both MUST be specified. + // + // An SP MUST attempt to make the provisioned volume available using + // the preferred topologies in order from first to last. + // + // If requisite is specified, all topologies in preferred list MUST + // also be present in the list of requisite topologies. + // + // If the SP is unable to to make the provisioned volume available + // from any of the preferred topologies, the SP MAY choose a topology + // from the list of requisite topologies. + // If the list of requisite topologies is not specified, then the SP + // MAY choose from the list of all possible topologies. + // If the list of requisite topologies is specified and the SP is + // unable to to make the provisioned volume available from any of the + // requisite topologies it MUST fail the CreateVolume call. + // + // Example 1: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"} + // preferred = + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // available from "zone" "Z3" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. + // + // Example 2: + // Given a volume should be accessible from a single zone, and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z2"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from "zone" "Z4" in the "region" "R1" and fall back to + // "zone" "Z2" in the "region" "R1" if that is not possible. If that + // is not possible, the SP may choose between either the "zone" + // "Z3" or "Z5" in the "region" "R1". + // + // Example 3: + // Given a volume should be accessible from TWO zones (because an + // opaque parameter in CreateVolumeRequest, for example, specifies + // the volume is accessible from two zones, aka synchronously + // replicated), and + // requisite = + // {"region": "R1", "zone": "Z2"}, + // {"region": "R1", "zone": "Z3"}, + // {"region": "R1", "zone": "Z4"}, + // {"region": "R1", "zone": "Z5"} + // preferred = + // {"region": "R1", "zone": "Z5"}, + // {"region": "R1", "zone": "Z3"} + // then the the SP SHOULD first attempt to make the provisioned volume + // accessible from the combination of the two "zones" "Z5" and "Z3" in + // the "region" "R1". If that's not possible, it should fall back to + // a combination of "Z5" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of "Z3" and other possibilities from the list of + // requisite. If that's not possible, it should fall back to a + // combination of other possibilities from the list of requisite. + Preferred []*Topology `protobuf:"bytes,2,rep,name=preferred,proto3" json:"preferred,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TopologyRequirement) Reset() { *m = TopologyRequirement{} } +func (m *TopologyRequirement) String() string { return proto.CompactTextString(m) } +func (*TopologyRequirement) ProtoMessage() {} +func (*TopologyRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{13} +} + +func (m *TopologyRequirement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TopologyRequirement.Unmarshal(m, b) +} +func (m *TopologyRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TopologyRequirement.Marshal(b, m, deterministic) +} +func (m *TopologyRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologyRequirement.Merge(m, src) +} +func (m *TopologyRequirement) XXX_Size() int { + return xxx_messageInfo_TopologyRequirement.Size(m) +} +func (m *TopologyRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_TopologyRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologyRequirement proto.InternalMessageInfo + +func (m *TopologyRequirement) GetRequisite() []*Topology { + if m != nil { + return m.Requisite + } + return nil +} + +func (m *TopologyRequirement) GetPreferred() []*Topology { + if m != nil { + return m.Preferred + } + return nil +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// A topological segment is a specific instance of a topological domain, +// like "zone3", "rack3", etc. +// For example {"com.company/zone": "Z1", "com.company/rack": "R3"} +// Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// The key name segment is REQUIRED. The prefix is OPTIONAL. +// The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// Each value (topological segment) MUST contain 1 or more strings. +// Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Topology) Reset() { *m = Topology{} } +func (m *Topology) String() string { return proto.CompactTextString(m) } +func (*Topology) ProtoMessage() {} +func (*Topology) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{14} +} + +func (m *Topology) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Topology.Unmarshal(m, b) +} +func (m *Topology) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Topology.Marshal(b, m, deterministic) +} +func (m *Topology) XXX_Merge(src proto.Message) { + xxx_messageInfo_Topology.Merge(m, src) +} +func (m *Topology) XXX_Size() int { + return xxx_messageInfo_Topology.Size(m) +} +func (m *Topology) XXX_DiscardUnknown() { + xxx_messageInfo_Topology.DiscardUnknown(m) +} + +var xxx_messageInfo_Topology proto.InternalMessageInfo + +func (m *Topology) GetSegments() map[string]string { + if m != nil { + return m.Segments + } + return nil +} + +type DeleteVolumeRequest struct { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Secrets required by plugin to complete volume deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } +func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeRequest) ProtoMessage() {} +func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{15} +} + +func (m *DeleteVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeRequest.Unmarshal(m, b) +} +func (m *DeleteVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeRequest.Marshal(b, m, deterministic) +} +func (m *DeleteVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeRequest.Merge(m, src) +} +func (m *DeleteVolumeRequest) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeRequest.Size(m) +} +func (m *DeleteVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeRequest proto.InternalMessageInfo + +func (m *DeleteVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *DeleteVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type DeleteVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } +func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeResponse) ProtoMessage() {} +func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{16} +} + +func (m *DeleteVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteVolumeResponse.Unmarshal(m, b) +} +func (m *DeleteVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteVolumeResponse.Marshal(b, m, deterministic) +} +func (m *DeleteVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteVolumeResponse.Merge(m, src) +} +func (m *DeleteVolumeResponse) XXX_Size() int { + return xxx_messageInfo_DeleteVolumeResponse.Size(m) +} +func (m *DeleteVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteVolumeResponse proto.InternalMessageInfo + +type ControllerPublishVolumeRequest struct { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetInfo`. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. + Readonly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"` + // Secrets required by plugin to complete controller publish volume + // request. This field is OPTIONAL. Refer to the + // `Secrets Requirements` section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } +func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeRequest) ProtoMessage() {} +func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{17} +} + +func (m *ControllerPublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerPublishVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerPublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerPublishVolumeRequest.Marshal(b, m, deterministic) +} +func (m *ControllerPublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeRequest.Merge(m, src) +} +func (m *ControllerPublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerPublishVolumeRequest.Size(m) +} +func (m *ControllerPublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerPublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerPublishVolumeRequest proto.InternalMessageInfo + +func (m *ControllerPublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *ControllerPublishVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +type ControllerPublishVolumeResponse struct { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to a CO. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the CO to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + PublishContext map[string]string `protobuf:"bytes,1,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } +func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeResponse) ProtoMessage() {} +func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{18} +} + +func (m *ControllerPublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerPublishVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerPublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerPublishVolumeResponse.Marshal(b, m, deterministic) +} +func (m *ControllerPublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerPublishVolumeResponse.Merge(m, src) +} +func (m *ControllerPublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerPublishVolumeResponse.Size(m) +} +func (m *ControllerPublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerPublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerPublishVolumeResponse proto.InternalMessageInfo + +func (m *ControllerPublishVolumeResponse) GetPublishContext() map[string]string { + if m != nil { + return m.PublishContext + } + return nil +} + +type ControllerUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetInfo` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume call for the specified volume. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } +func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} +func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{19} +} + +func (m *ControllerUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Marshal(b, m, deterministic) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeRequest.Merge(m, src) +} +func (m *ControllerUnpublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerUnpublishVolumeRequest.Size(m) +} +func (m *ControllerUnpublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerUnpublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerUnpublishVolumeRequest proto.InternalMessageInfo + +func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type ControllerUnpublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } +func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} +func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{20} +} + +func (m *ControllerUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Marshal(b, m, deterministic) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerUnpublishVolumeResponse.Merge(m, src) +} +func (m *ControllerUnpublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerUnpublishVolumeResponse.Size(m) +} +func (m *ControllerUnpublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerUnpublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerUnpublishVolumeResponse proto.InternalMessageInfo + +type ValidateVolumeCapabilitiesRequest struct { + // The ID of the volume to check. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,2,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "confirmed" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // See CreateVolumeRequest.parameters. + // This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Secrets required by plugin to complete volume validation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } +func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{21} +} + +func (m *ValidateVolumeCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Merge(m, src) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesRequest.Size(m) +} +func (m *ValidateVolumeCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesRequest proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type ValidateVolumeCapabilitiesResponse struct { + // Confirmed indicates to the CO the set of capabilities that the + // plugin has validated. This field SHALL only be set to a non-empty + // value for successful validation responses. + // For successful validation responses, the CO SHALL compare the + // fields of this message to the originally requested capabilities in + // order to guard against an older plugin reporting "valid" for newer + // capability fields that it does not yet understand. + // This field is OPTIONAL. + Confirmed *ValidateVolumeCapabilitiesResponse_Confirmed `protobuf:"bytes,1,opt,name=confirmed,proto3" json:"confirmed,omitempty"` + // Message to the CO if `confirmed` above is empty. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } +func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{22} +} + +func (m *ValidateVolumeCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Merge(m, src) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse.Size(m) +} +func (m *ValidateVolumeCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesResponse proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesResponse) GetConfirmed() *ValidateVolumeCapabilitiesResponse_Confirmed { + if m != nil { + return m.Confirmed + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +type ValidateVolumeCapabilitiesResponse_Confirmed struct { + // Volume context validated by the plugin. + // This field is OPTIONAL. + VolumeContext map[string]string `protobuf:"bytes,1,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume capabilities supported by the plugin. + // This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // The volume creation parameters validated by the plugin. + // This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) Reset() { + *m = ValidateVolumeCapabilitiesResponse_Confirmed{} +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) String() string { + return proto.CompactTextString(m) +} +func (*ValidateVolumeCapabilitiesResponse_Confirmed) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse_Confirmed) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{22, 0} +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Unmarshal(m, b) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Marshal(b, m, deterministic) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Merge(m, src) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_Size() int { + return xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.Size(m) +} +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateVolumeCapabilitiesResponse_Confirmed proto.InternalMessageInfo + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *ValidateVolumeCapabilitiesResponse_Confirmed) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type ListVolumesRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } +func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } +func (*ListVolumesRequest) ProtoMessage() {} +func (*ListVolumesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{23} +} + +func (m *ListVolumesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesRequest.Unmarshal(m, b) +} +func (m *ListVolumesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesRequest.Marshal(b, m, deterministic) +} +func (m *ListVolumesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesRequest.Merge(m, src) +} +func (m *ListVolumesRequest) XXX_Size() int { + return xxx_messageInfo_ListVolumesRequest.Size(m) +} +func (m *ListVolumesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesRequest proto.InternalMessageInfo + +func (m *ListVolumesRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListVolumesRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +type ListVolumesResponse struct { + Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } +func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse) ProtoMessage() {} +func (*ListVolumesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{24} +} + +func (m *ListVolumesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse.Unmarshal(m, b) +} +func (m *ListVolumesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse.Marshal(b, m, deterministic) +} +func (m *ListVolumesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse.Merge(m, src) +} +func (m *ListVolumesResponse) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse.Size(m) +} +func (m *ListVolumesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse proto.InternalMessageInfo + +func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListVolumesResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListVolumesResponse_VolumeStatus struct { + // A list of all `node_id` of nodes that the volume in this entry + // is controller published on. + // This field is OPTIONAL. If it is not specified and the SP has + // the LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO + // MAY assume the volume is not controller published to any nodes. + // If the field is not specified and the SP does not have the + // LIST_VOLUMES_PUBLISHED_NODES controller capability, the CO MUST + // not interpret this field. + // published_node_ids MAY include nodes not published to or + // reported by the SP. The CO MUST be resilient to that. + PublishedNodeIds []string `protobuf:"bytes,1,rep,name=published_node_ids,json=publishedNodeIds,proto3" json:"published_node_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse_VolumeStatus) Reset() { *m = ListVolumesResponse_VolumeStatus{} } +func (m *ListVolumesResponse_VolumeStatus) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_VolumeStatus) ProtoMessage() {} +func (*ListVolumesResponse_VolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{24, 0} +} + +func (m *ListVolumesResponse_VolumeStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Unmarshal(m, b) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Marshal(b, m, deterministic) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse_VolumeStatus.Merge(m, src) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse_VolumeStatus.Size(m) +} +func (m *ListVolumesResponse_VolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse_VolumeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse_VolumeStatus proto.InternalMessageInfo + +func (m *ListVolumesResponse_VolumeStatus) GetPublishedNodeIds() []string { + if m != nil { + return m.PublishedNodeIds + } + return nil +} + +type ListVolumesResponse_Entry struct { + // This field is REQUIRED + Volume *Volume `protobuf:"bytes,1,opt,name=volume,proto3" json:"volume,omitempty"` + // This field is OPTIONAL. This field MUST be specified if the + // LIST_VOLUMES_PUBLISHED_NODES controller capability is + // supported. + Status *ListVolumesResponse_VolumeStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } +func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_Entry) ProtoMessage() {} +func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{24, 1} +} + +func (m *ListVolumesResponse_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListVolumesResponse_Entry.Unmarshal(m, b) +} +func (m *ListVolumesResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListVolumesResponse_Entry.Marshal(b, m, deterministic) +} +func (m *ListVolumesResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListVolumesResponse_Entry.Merge(m, src) +} +func (m *ListVolumesResponse_Entry) XXX_Size() int { + return xxx_messageInfo_ListVolumesResponse_Entry.Size(m) +} +func (m *ListVolumesResponse_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_ListVolumesResponse_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_ListVolumesResponse_Entry proto.InternalMessageInfo + +func (m *ListVolumesResponse_Entry) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +func (m *ListVolumesResponse_Entry) GetStatus() *ListVolumesResponse_VolumeStatus { + if m != nil { + return m.Status + } + return nil +} + +type GetCapacityRequest struct { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,1,rep,name=volume_capabilities,json=volumeCapabilities,proto3" json:"volume_capabilities,omitempty"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that in the specified + // `accessible_topology`. This is the same as the + // `accessible_topology` the CO returns in a `CreateVolumeResponse`. + // This field is OPTIONAL. This field SHALL NOT be set unless the + // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability. + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } +func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } +func (*GetCapacityRequest) ProtoMessage() {} +func (*GetCapacityRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{25} +} + +func (m *GetCapacityRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCapacityRequest.Unmarshal(m, b) +} +func (m *GetCapacityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCapacityRequest.Marshal(b, m, deterministic) +} +func (m *GetCapacityRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityRequest.Merge(m, src) +} +func (m *GetCapacityRequest) XXX_Size() int { + return xxx_messageInfo_GetCapacityRequest.Size(m) +} +func (m *GetCapacityRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetCapacityRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCapacityRequest proto.InternalMessageInfo + +func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *GetCapacityRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *GetCapacityRequest) GetAccessibleTopology() *Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type GetCapacityResponse struct { + // The available capacity, in bytes, of the storage that can be used + // to provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + AvailableCapacity int64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity,proto3" json:"available_capacity,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } +func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } +func (*GetCapacityResponse) ProtoMessage() {} +func (*GetCapacityResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{26} +} + +func (m *GetCapacityResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetCapacityResponse.Unmarshal(m, b) +} +func (m *GetCapacityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetCapacityResponse.Marshal(b, m, deterministic) +} +func (m *GetCapacityResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetCapacityResponse.Merge(m, src) +} +func (m *GetCapacityResponse) XXX_Size() int { + return xxx_messageInfo_GetCapacityResponse.Size(m) +} +func (m *GetCapacityResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetCapacityResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetCapacityResponse proto.InternalMessageInfo + +func (m *GetCapacityResponse) GetAvailableCapacity() int64 { + if m != nil { + return m.AvailableCapacity + } + return 0 +} + +type ControllerGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } +func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} +func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{27} +} + +func (m *ControllerGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesRequest.Merge(m, src) +} +func (m *ControllerGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_ControllerGetCapabilitiesRequest.Size(m) +} +func (m *ControllerGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerGetCapabilitiesRequest proto.InternalMessageInfo + +type ControllerGetCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*ControllerServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } +func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} +func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{28} +} + +func (m *ControllerGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerGetCapabilitiesResponse.Merge(m, src) +} +func (m *ControllerGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_ControllerGetCapabilitiesResponse.Size(m) +} +func (m *ControllerGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the controller service. +type ControllerServiceCapability struct { + // Types that are valid to be assigned to Type: + // *ControllerServiceCapability_Rpc + Type isControllerServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } +func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability) ProtoMessage() {} +func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{29} +} + +func (m *ControllerServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerServiceCapability.Unmarshal(m, b) +} +func (m *ControllerServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerServiceCapability.Marshal(b, m, deterministic) +} +func (m *ControllerServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability.Merge(m, src) +} +func (m *ControllerServiceCapability) XXX_Size() int { + return xxx_messageInfo_ControllerServiceCapability.Size(m) +} +func (m *ControllerServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerServiceCapability proto.InternalMessageInfo + +type isControllerServiceCapability_Type interface { + isControllerServiceCapability_Type() +} + +type ControllerServiceCapability_Rpc struct { + Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` +} + +func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} + +func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { + if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ControllerServiceCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ControllerServiceCapability_Rpc)(nil), + } +} + +type ControllerServiceCapability_RPC struct { + Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } +func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability_RPC) ProtoMessage() {} +func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{29, 0} +} + +func (m *ControllerServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerServiceCapability_RPC.Unmarshal(m, b) +} +func (m *ControllerServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (m *ControllerServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerServiceCapability_RPC.Merge(m, src) +} +func (m *ControllerServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_ControllerServiceCapability_RPC.Size(m) +} +func (m *ControllerServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerServiceCapability_RPC proto.InternalMessageInfo + +func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return ControllerServiceCapability_RPC_UNKNOWN +} + +type CreateSnapshotRequest struct { + // The ID of the source volume to be snapshotted. + // This field is REQUIRED. + SourceVolumeId string `protobuf:"bytes,1,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` + // The suggested name for the snapshot. This field is REQUIRED for + // idempotency. + // Any Unicode string that conforms to the length limit is allowed + // except those containing the following banned characters: + // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F. + // (These are control characters other than commonly used whitespace.) + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Secrets required by plugin to complete snapshot creation request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + // Use cases for opaque parameters: + // - Specify a policy to automatically clean up the snapshot. + // - Specify an expiration date for the snapshot. + // - Specify whether the snapshot is readonly or read/write. + // - Specify if the snapshot should be replicated to some place. + // - Specify primary or secondary for replication systems that + // support snapshotting only on primary. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotRequest) Reset() { *m = CreateSnapshotRequest{} } +func (m *CreateSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotRequest) ProtoMessage() {} +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{30} +} + +func (m *CreateSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotRequest.Unmarshal(m, b) +} +func (m *CreateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotRequest.Marshal(b, m, deterministic) +} +func (m *CreateSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotRequest.Merge(m, src) +} +func (m *CreateSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotRequest.Size(m) +} +func (m *CreateSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotRequest proto.InternalMessageInfo + +func (m *CreateSnapshotRequest) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *CreateSnapshotRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateSnapshotRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *CreateSnapshotRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type CreateSnapshotResponse struct { + // Contains all attributes of the newly created snapshot that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the snapshot. This field is REQUIRED. + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateSnapshotResponse) Reset() { *m = CreateSnapshotResponse{} } +func (m *CreateSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*CreateSnapshotResponse) ProtoMessage() {} +func (*CreateSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{31} +} + +func (m *CreateSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateSnapshotResponse.Unmarshal(m, b) +} +func (m *CreateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateSnapshotResponse.Marshal(b, m, deterministic) +} +func (m *CreateSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateSnapshotResponse.Merge(m, src) +} +func (m *CreateSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_CreateSnapshotResponse.Size(m) +} +func (m *CreateSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateSnapshotResponse proto.InternalMessageInfo + +func (m *CreateSnapshotResponse) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +// Information about a specific snapshot. +type Snapshot struct { + // This is the complete size of the snapshot in bytes. The purpose of + // this field is to give CO guidance on how much space is needed to + // create a volume from this snapshot. The size of the volume MUST NOT + // be less than the size of the source snapshot. This field is + // OPTIONAL. If this field is not set, it indicates that this size is + // unknown. The value of this field MUST NOT be negative and a size of + // zero means it is unspecified. + SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` + // The identifier for this snapshot, generated by the plugin. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific snapshot vs all other snapshots supported by this + // plugin. + // This field SHALL be used by the CO in subsequent calls to refer to + // this snapshot. + // The SP is NOT responsible for global uniqueness of snapshot_id + // across multiple SPs. + SnapshotId string `protobuf:"bytes,2,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // Identity information for the source volume. Note that creating a + // snapshot from a snapshot is not supported here so the source has to + // be a volume. This field is REQUIRED. + SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` + // Timestamp when the point-in-time snapshot is taken on the storage + // system. This field is REQUIRED. + CreationTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + // Indicates if a snapshot is ready to use as a + // `volume_content_source` in a `CreateVolumeRequest`. The default + // value is false. This field is REQUIRED. + ReadyToUse bool `protobuf:"varint,5,opt,name=ready_to_use,json=readyToUse,proto3" json:"ready_to_use,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{32} +} + +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Snapshot.Unmarshal(m, b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) +} +func (m *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(m, src) +} +func (m *Snapshot) XXX_Size() int { + return xxx_messageInfo_Snapshot.Size(m) +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetSizeBytes() int64 { + if m != nil { + return m.SizeBytes + } + return 0 +} + +func (m *Snapshot) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *Snapshot) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *Snapshot) GetCreationTime() *timestamp.Timestamp { + if m != nil { + return m.CreationTime + } + return nil +} + +func (m *Snapshot) GetReadyToUse() bool { + if m != nil { + return m.ReadyToUse + } + return false +} + +type DeleteSnapshotRequest struct { + // The ID of the snapshot to be deleted. + // This field is REQUIRED. + SnapshotId string `protobuf:"bytes,1,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // Secrets required by plugin to complete snapshot deletion request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,2,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotRequest) Reset() { *m = DeleteSnapshotRequest{} } +func (m *DeleteSnapshotRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotRequest) ProtoMessage() {} +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{33} +} + +func (m *DeleteSnapshotRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotRequest.Unmarshal(m, b) +} +func (m *DeleteSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotRequest.Marshal(b, m, deterministic) +} +func (m *DeleteSnapshotRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotRequest.Merge(m, src) +} +func (m *DeleteSnapshotRequest) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotRequest.Size(m) +} +func (m *DeleteSnapshotRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotRequest proto.InternalMessageInfo + +func (m *DeleteSnapshotRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *DeleteSnapshotRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type DeleteSnapshotResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteSnapshotResponse) Reset() { *m = DeleteSnapshotResponse{} } +func (m *DeleteSnapshotResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSnapshotResponse) ProtoMessage() {} +func (*DeleteSnapshotResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{34} +} + +func (m *DeleteSnapshotResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteSnapshotResponse.Unmarshal(m, b) +} +func (m *DeleteSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteSnapshotResponse.Marshal(b, m, deterministic) +} +func (m *DeleteSnapshotResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteSnapshotResponse.Merge(m, src) +} +func (m *DeleteSnapshotResponse) XXX_Size() int { + return xxx_messageInfo_DeleteSnapshotResponse.Size(m) +} +func (m *DeleteSnapshotResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteSnapshotResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteSnapshotResponse proto.InternalMessageInfo + +// List all snapshots on the storage system regardless of how they were +// created. +type ListSnapshotsRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListSnapshots` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListSnapshots` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken,proto3" json:"starting_token,omitempty"` + // Identity information for the source volume. This field is OPTIONAL. + // It can be used to list snapshots by volume. + SourceVolumeId string `protobuf:"bytes,3,opt,name=source_volume_id,json=sourceVolumeId,proto3" json:"source_volume_id,omitempty"` + // Identity information for a specific snapshot. This field is + // OPTIONAL. It can be used to list only a specific snapshot. + // ListSnapshots will return with current snapshot information + // and will not block if the snapshot is being processed after + // it is cut. + SnapshotId string `protobuf:"bytes,4,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` + // Secrets required by plugin to complete ListSnapshot request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } +func (m *ListSnapshotsRequest) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsRequest) ProtoMessage() {} +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{35} +} + +func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsRequest.Unmarshal(m, b) +} +func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) +} +func (m *ListSnapshotsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsRequest.Merge(m, src) +} +func (m *ListSnapshotsRequest) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsRequest.Size(m) +} +func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo + +func (m *ListSnapshotsRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListSnapshotsRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +func (m *ListSnapshotsRequest) GetSourceVolumeId() string { + if m != nil { + return m.SourceVolumeId + } + return "" +} + +func (m *ListSnapshotsRequest) GetSnapshotId() string { + if m != nil { + return m.SnapshotId + } + return "" +} + +func (m *ListSnapshotsRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +type ListSnapshotsResponse struct { + Entries []*ListSnapshotsResponse_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListSnapshots` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListSnapshots` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } +func (m *ListSnapshotsResponse) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse) ProtoMessage() {} +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{36} +} + +func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) +} +func (m *ListSnapshotsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse.Merge(m, src) +} +func (m *ListSnapshotsResponse) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse.Size(m) +} +func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo + +func (m *ListSnapshotsResponse) GetEntries() []*ListSnapshotsResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListSnapshotsResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListSnapshotsResponse_Entry struct { + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListSnapshotsResponse_Entry) Reset() { *m = ListSnapshotsResponse_Entry{} } +func (m *ListSnapshotsResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListSnapshotsResponse_Entry) ProtoMessage() {} +func (*ListSnapshotsResponse_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{36, 0} +} + +func (m *ListSnapshotsResponse_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Unmarshal(m, b) +} +func (m *ListSnapshotsResponse_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Marshal(b, m, deterministic) +} +func (m *ListSnapshotsResponse_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListSnapshotsResponse_Entry.Merge(m, src) +} +func (m *ListSnapshotsResponse_Entry) XXX_Size() int { + return xxx_messageInfo_ListSnapshotsResponse_Entry.Size(m) +} +func (m *ListSnapshotsResponse_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_ListSnapshotsResponse_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_ListSnapshotsResponse_Entry proto.InternalMessageInfo + +func (m *ListSnapshotsResponse_Entry) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type ControllerExpandVolumeRequest struct { + // The ID of the volume to expand. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // This allows CO to specify the capacity requirements of the volume + // after expansion. This field is REQUIRED. + CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"` + // Secrets required by the plugin for expanding the volume. + // This field is OPTIONAL. + Secrets map[string]string `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is + // being used as a block device - the SP MAY set + // node_expansion_required to false in ControllerExpandVolumeResponse + // to skip invocation of NodeExpandVolume on the node by the CO. + // This is an OPTIONAL field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerExpandVolumeRequest) Reset() { *m = ControllerExpandVolumeRequest{} } +func (m *ControllerExpandVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerExpandVolumeRequest) ProtoMessage() {} +func (*ControllerExpandVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{37} +} + +func (m *ControllerExpandVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerExpandVolumeRequest.Unmarshal(m, b) +} +func (m *ControllerExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerExpandVolumeRequest.Marshal(b, m, deterministic) +} +func (m *ControllerExpandVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerExpandVolumeRequest.Merge(m, src) +} +func (m *ControllerExpandVolumeRequest) XXX_Size() int { + return xxx_messageInfo_ControllerExpandVolumeRequest.Size(m) +} +func (m *ControllerExpandVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerExpandVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerExpandVolumeRequest proto.InternalMessageInfo + +func (m *ControllerExpandVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerExpandVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *ControllerExpandVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *ControllerExpandVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +type ControllerExpandVolumeResponse struct { + // Capacity of volume after expansion. This field is REQUIRED. + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` + // Whether node expansion is required for the volume. When true + // the CO MUST make NodeExpandVolume RPC call on the node. This field + // is REQUIRED. + NodeExpansionRequired bool `protobuf:"varint,2,opt,name=node_expansion_required,json=nodeExpansionRequired,proto3" json:"node_expansion_required,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControllerExpandVolumeResponse) Reset() { *m = ControllerExpandVolumeResponse{} } +func (m *ControllerExpandVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerExpandVolumeResponse) ProtoMessage() {} +func (*ControllerExpandVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{38} +} + +func (m *ControllerExpandVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ControllerExpandVolumeResponse.Unmarshal(m, b) +} +func (m *ControllerExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ControllerExpandVolumeResponse.Marshal(b, m, deterministic) +} +func (m *ControllerExpandVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerExpandVolumeResponse.Merge(m, src) +} +func (m *ControllerExpandVolumeResponse) XXX_Size() int { + return xxx_messageInfo_ControllerExpandVolumeResponse.Size(m) +} +func (m *ControllerExpandVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerExpandVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerExpandVolumeResponse proto.InternalMessageInfo + +func (m *ControllerExpandVolumeResponse) GetCapacityBytes() int64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +func (m *ControllerExpandVolumeResponse) GetNodeExpansionRequired() bool { + if m != nil { + return m.NodeExpansionRequired + } + return false +} + +type NodeStageVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishContext map[string]string `protobuf:"bytes,2,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The path to which the volume MAY be staged. It MUST be an + // absolute path in the root filesystem of the process serving this + // request, and MUST be a directory. The CO SHALL ensure that there + // is only one `staging_target_path` per volume. The CO SHALL ensure + // that the path is directory and that the process serving the + // request has `read` and `write` permission to that directory. The + // CO SHALL be responsible for creating the directory if it does not + // exist. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the staged volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + // Secrets required by plugin to complete node stage volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,5,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,6,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeStageVolumeRequest) Reset() { *m = NodeStageVolumeRequest{} } +func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeRequest) ProtoMessage() {} +func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{39} +} + +func (m *NodeStageVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeStageVolumeRequest.Unmarshal(m, b) +} +func (m *NodeStageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeStageVolumeRequest.Marshal(b, m, deterministic) +} +func (m *NodeStageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeRequest.Merge(m, src) +} +func (m *NodeStageVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeStageVolumeRequest.Size(m) +} +func (m *NodeStageVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStageVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStageVolumeRequest proto.InternalMessageInfo + +func (m *NodeStageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeStageVolumeRequest) GetPublishContext() map[string]string { + if m != nil { + return m.PublishContext + } + return nil +} + +func (m *NodeStageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodeStageVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *NodeStageVolumeRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +type NodeStageVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeStageVolumeResponse) Reset() { *m = NodeStageVolumeResponse{} } +func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeResponse) ProtoMessage() {} +func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{40} +} + +func (m *NodeStageVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeStageVolumeResponse.Unmarshal(m, b) +} +func (m *NodeStageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeStageVolumeResponse.Marshal(b, m, deterministic) +} +func (m *NodeStageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStageVolumeResponse.Merge(m, src) +} +func (m *NodeStageVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeStageVolumeResponse.Size(m) +} +func (m *NodeStageVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStageVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStageVolumeResponse proto.InternalMessageInfo + +type NodeUnstageVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The path at which the volume was staged. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnstageVolumeRequest) Reset() { *m = NodeUnstageVolumeRequest{} } +func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeRequest) ProtoMessage() {} +func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{41} +} + +func (m *NodeUnstageVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnstageVolumeRequest.Unmarshal(m, b) +} +func (m *NodeUnstageVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnstageVolumeRequest.Marshal(b, m, deterministic) +} +func (m *NodeUnstageVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeRequest.Merge(m, src) +} +func (m *NodeUnstageVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeUnstageVolumeRequest.Size(m) +} +func (m *NodeUnstageVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnstageVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnstageVolumeRequest proto.InternalMessageInfo + +func (m *NodeUnstageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +type NodeUnstageVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnstageVolumeResponse) Reset() { *m = NodeUnstageVolumeResponse{} } +func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeResponse) ProtoMessage() {} +func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{42} +} + +func (m *NodeUnstageVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnstageVolumeResponse.Unmarshal(m, b) +} +func (m *NodeUnstageVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnstageVolumeResponse.Marshal(b, m, deterministic) +} +func (m *NodeUnstageVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnstageVolumeResponse.Merge(m, src) +} +func (m *NodeUnstageVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeUnstageVolumeResponse.Size(m) +} +func (m *NodeUnstageVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnstageVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnstageVolumeResponse proto.InternalMessageInfo + +type NodePublishVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishContext map[string]string `protobuf:"bytes,2,rep,name=publish_context,json=publishContext,proto3" json:"publish_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + // For volumes with an access type of block, the SP SHALL place the + // block device at target_path. + // For volumes with an access type of mount, the SP SHALL place the + // mounted directory at target_path. + // Creation of target_path is the responsibility of the SP. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + // Indicates SP MUST publish the volume in readonly mode. + // This field is REQUIRED. + Readonly bool `protobuf:"varint,6,opt,name=readonly,proto3" json:"readonly,omitempty"` + // Secrets required by plugin to complete node publish volume request. + // This field is OPTIONAL. Refer to the `Secrets Requirements` + // section on how to use this field. + Secrets map[string]string `protobuf:"bytes,7,rep,name=secrets,proto3" json:"secrets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume context as returned by CO in CreateVolumeRequest. This field + // is OPTIONAL and MUST match the volume_context of the volume + // identified by `volume_id`. + VolumeContext map[string]string `protobuf:"bytes,8,rep,name=volume_context,json=volumeContext,proto3" json:"volume_context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } +func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeRequest) ProtoMessage() {} +func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{43} +} + +func (m *NodePublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePublishVolumeRequest.Unmarshal(m, b) +} +func (m *NodePublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePublishVolumeRequest.Marshal(b, m, deterministic) +} +func (m *NodePublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeRequest.Merge(m, src) +} +func (m *NodePublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodePublishVolumeRequest.Size(m) +} +func (m *NodePublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodePublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePublishVolumeRequest proto.InternalMessageInfo + +func (m *NodePublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodePublishVolumeRequest) GetPublishContext() map[string]string { + if m != nil { + return m.PublishContext + } + return nil +} + +func (m *NodePublishVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodePublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *NodePublishVolumeRequest) GetSecrets() map[string]string { + if m != nil { + return m.Secrets + } + return nil +} + +func (m *NodePublishVolumeRequest) GetVolumeContext() map[string]string { + if m != nil { + return m.VolumeContext + } + return nil +} + +type NodePublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } +func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeResponse) ProtoMessage() {} +func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{44} +} + +func (m *NodePublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodePublishVolumeResponse.Unmarshal(m, b) +} +func (m *NodePublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodePublishVolumeResponse.Marshal(b, m, deterministic) +} +func (m *NodePublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePublishVolumeResponse.Merge(m, src) +} +func (m *NodePublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodePublishVolumeResponse.Size(m) +} +func (m *NodePublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodePublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePublishVolumeResponse proto.InternalMessageInfo + +type NodeUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // The SP MUST delete the file or directory it created at this path. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath,proto3" json:"target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } +func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeRequest) ProtoMessage() {} +func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{45} +} + +func (m *NodeUnpublishVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Unmarshal(m, b) +} +func (m *NodeUnpublishVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Marshal(b, m, deterministic) +} +func (m *NodeUnpublishVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeRequest.Merge(m, src) +} +func (m *NodeUnpublishVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeUnpublishVolumeRequest.Size(m) +} +func (m *NodeUnpublishVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnpublishVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnpublishVolumeRequest proto.InternalMessageInfo + +func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +type NodeUnpublishVolumeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } +func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeResponse) ProtoMessage() {} +func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{46} +} + +func (m *NodeUnpublishVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Unmarshal(m, b) +} +func (m *NodeUnpublishVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Marshal(b, m, deterministic) +} +func (m *NodeUnpublishVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnpublishVolumeResponse.Merge(m, src) +} +func (m *NodeUnpublishVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeUnpublishVolumeResponse.Size(m) +} +func (m *NodeUnpublishVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnpublishVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnpublishVolumeResponse proto.InternalMessageInfo + +type NodeGetVolumeStatsRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // It can be any valid path where volume was previously + // staged or published. + // It MUST be an absolute path in the root filesystem of + // the process serving this request. + // This is a REQUIRED field. + VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"` + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetVolumeStatsRequest) Reset() { *m = NodeGetVolumeStatsRequest{} } +func (m *NodeGetVolumeStatsRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetVolumeStatsRequest) ProtoMessage() {} +func (*NodeGetVolumeStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{47} +} + +func (m *NodeGetVolumeStatsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetVolumeStatsRequest.Unmarshal(m, b) +} +func (m *NodeGetVolumeStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetVolumeStatsRequest.Marshal(b, m, deterministic) +} +func (m *NodeGetVolumeStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetVolumeStatsRequest.Merge(m, src) +} +func (m *NodeGetVolumeStatsRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetVolumeStatsRequest.Size(m) +} +func (m *NodeGetVolumeStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetVolumeStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetVolumeStatsRequest proto.InternalMessageInfo + +func (m *NodeGetVolumeStatsRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeGetVolumeStatsRequest) GetVolumePath() string { + if m != nil { + return m.VolumePath + } + return "" +} + +func (m *NodeGetVolumeStatsRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +type NodeGetVolumeStatsResponse struct { + // This field is OPTIONAL. + Usage []*VolumeUsage `protobuf:"bytes,1,rep,name=usage,proto3" json:"usage,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetVolumeStatsResponse) Reset() { *m = NodeGetVolumeStatsResponse{} } +func (m *NodeGetVolumeStatsResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetVolumeStatsResponse) ProtoMessage() {} +func (*NodeGetVolumeStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{48} +} + +func (m *NodeGetVolumeStatsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetVolumeStatsResponse.Unmarshal(m, b) +} +func (m *NodeGetVolumeStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetVolumeStatsResponse.Marshal(b, m, deterministic) +} +func (m *NodeGetVolumeStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetVolumeStatsResponse.Merge(m, src) +} +func (m *NodeGetVolumeStatsResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetVolumeStatsResponse.Size(m) +} +func (m *NodeGetVolumeStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetVolumeStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetVolumeStatsResponse proto.InternalMessageInfo + +func (m *NodeGetVolumeStatsResponse) GetUsage() []*VolumeUsage { + if m != nil { + return m.Usage + } + return nil +} + +type VolumeUsage struct { + // The available capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + Available int64 `protobuf:"varint,1,opt,name=available,proto3" json:"available,omitempty"` + // The total capacity in specified Unit. This field is REQUIRED. + // The value of this field MUST NOT be negative. + Total int64 `protobuf:"varint,2,opt,name=total,proto3" json:"total,omitempty"` + // The used capacity in specified Unit. This field is OPTIONAL. + // The value of this field MUST NOT be negative. + Used int64 `protobuf:"varint,3,opt,name=used,proto3" json:"used,omitempty"` + // Units by which values are measured. This field is REQUIRED. + Unit VolumeUsage_Unit `protobuf:"varint,4,opt,name=unit,proto3,enum=csi.v1.VolumeUsage_Unit" json:"unit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VolumeUsage) Reset() { *m = VolumeUsage{} } +func (m *VolumeUsage) String() string { return proto.CompactTextString(m) } +func (*VolumeUsage) ProtoMessage() {} +func (*VolumeUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{49} +} + +func (m *VolumeUsage) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VolumeUsage.Unmarshal(m, b) +} +func (m *VolumeUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VolumeUsage.Marshal(b, m, deterministic) +} +func (m *VolumeUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeUsage.Merge(m, src) +} +func (m *VolumeUsage) XXX_Size() int { + return xxx_messageInfo_VolumeUsage.Size(m) +} +func (m *VolumeUsage) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeUsage proto.InternalMessageInfo + +func (m *VolumeUsage) GetAvailable() int64 { + if m != nil { + return m.Available + } + return 0 +} + +func (m *VolumeUsage) GetTotal() int64 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *VolumeUsage) GetUsed() int64 { + if m != nil { + return m.Used + } + return 0 +} + +func (m *VolumeUsage) GetUnit() VolumeUsage_Unit { + if m != nil { + return m.Unit + } + return VolumeUsage_UNKNOWN +} + +type NodeGetCapabilitiesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } +func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesRequest) ProtoMessage() {} +func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{50} +} + +func (m *NodeGetCapabilitiesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Unmarshal(m, b) +} +func (m *NodeGetCapabilitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Marshal(b, m, deterministic) +} +func (m *NodeGetCapabilitiesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesRequest.Merge(m, src) +} +func (m *NodeGetCapabilitiesRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetCapabilitiesRequest.Size(m) +} +func (m *NodeGetCapabilitiesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetCapabilitiesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetCapabilitiesRequest proto.InternalMessageInfo + +type NodeGetCapabilitiesResponse struct { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities,proto3" json:"capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } +func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesResponse) ProtoMessage() {} +func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{51} +} + +func (m *NodeGetCapabilitiesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Unmarshal(m, b) +} +func (m *NodeGetCapabilitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Marshal(b, m, deterministic) +} +func (m *NodeGetCapabilitiesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetCapabilitiesResponse.Merge(m, src) +} +func (m *NodeGetCapabilitiesResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetCapabilitiesResponse.Size(m) +} +func (m *NodeGetCapabilitiesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetCapabilitiesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetCapabilitiesResponse proto.InternalMessageInfo + +func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the node service. +type NodeServiceCapability struct { + // Types that are valid to be assigned to Type: + // *NodeServiceCapability_Rpc + Type isNodeServiceCapability_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } +func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability) ProtoMessage() {} +func (*NodeServiceCapability) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{52} +} + +func (m *NodeServiceCapability) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeServiceCapability.Unmarshal(m, b) +} +func (m *NodeServiceCapability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeServiceCapability.Marshal(b, m, deterministic) +} +func (m *NodeServiceCapability) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability.Merge(m, src) +} +func (m *NodeServiceCapability) XXX_Size() int { + return xxx_messageInfo_NodeServiceCapability.Size(m) +} +func (m *NodeServiceCapability) XXX_DiscardUnknown() { + xxx_messageInfo_NodeServiceCapability.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeServiceCapability proto.InternalMessageInfo + +type isNodeServiceCapability_Type interface { + isNodeServiceCapability_Type() +} + +type NodeServiceCapability_Rpc struct { + Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,proto3,oneof"` +} + +func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} + +func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { + if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*NodeServiceCapability) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*NodeServiceCapability_Rpc)(nil), + } +} + +type NodeServiceCapability_RPC struct { + Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,proto3,enum=csi.v1.NodeServiceCapability_RPC_Type" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } +func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability_RPC) ProtoMessage() {} +func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{52, 0} +} + +func (m *NodeServiceCapability_RPC) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeServiceCapability_RPC.Unmarshal(m, b) +} +func (m *NodeServiceCapability_RPC) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeServiceCapability_RPC.Marshal(b, m, deterministic) +} +func (m *NodeServiceCapability_RPC) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeServiceCapability_RPC.Merge(m, src) +} +func (m *NodeServiceCapability_RPC) XXX_Size() int { + return xxx_messageInfo_NodeServiceCapability_RPC.Size(m) +} +func (m *NodeServiceCapability_RPC) XXX_DiscardUnknown() { + xxx_messageInfo_NodeServiceCapability_RPC.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeServiceCapability_RPC proto.InternalMessageInfo + +func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return NodeServiceCapability_RPC_UNKNOWN +} + +type NodeGetInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetInfoRequest) Reset() { *m = NodeGetInfoRequest{} } +func (m *NodeGetInfoRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetInfoRequest) ProtoMessage() {} +func (*NodeGetInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{53} +} + +func (m *NodeGetInfoRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetInfoRequest.Unmarshal(m, b) +} +func (m *NodeGetInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetInfoRequest.Marshal(b, m, deterministic) +} +func (m *NodeGetInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoRequest.Merge(m, src) +} +func (m *NodeGetInfoRequest) XXX_Size() int { + return xxx_messageInfo_NodeGetInfoRequest.Size(m) +} +func (m *NodeGetInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetInfoRequest proto.InternalMessageInfo + +type NodeGetInfoResponse struct { + // The identifier of the node as understood by the SP. + // This field is REQUIRED. + // This field MUST contain enough information to uniquely identify + // this specific node vs all other nodes supported by this plugin. + // This field SHALL be used by the CO in subsequent calls, including + // `ControllerPublishVolume`, to refer to this node. + // The SP is NOT responsible for global uniqueness of node_id across + // multiple SPs. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Maximum number of volumes that controller can publish to the node. + // If value is not set or zero CO SHALL decide how many volumes of + // this type can be published by the controller to the node. The + // plugin MUST NOT set negative values here. + // This field is OPTIONAL. + MaxVolumesPerNode int64 `protobuf:"varint,2,opt,name=max_volumes_per_node,json=maxVolumesPerNode,proto3" json:"max_volumes_per_node,omitempty"` + // Specifies where (regions, zones, racks, etc.) the node is + // accessible from. + // A plugin that returns this field MUST also set the + // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability. + // COs MAY use this information along with the topology information + // returned in CreateVolumeResponse to ensure that a given volume is + // accessible from a given node when scheduling workloads. + // This field is OPTIONAL. If it is not specified, the CO MAY assume + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "Z2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2". + AccessibleTopology *Topology `protobuf:"bytes,3,opt,name=accessible_topology,json=accessibleTopology,proto3" json:"accessible_topology,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeGetInfoResponse) Reset() { *m = NodeGetInfoResponse{} } +func (m *NodeGetInfoResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetInfoResponse) ProtoMessage() {} +func (*NodeGetInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{54} +} + +func (m *NodeGetInfoResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeGetInfoResponse.Unmarshal(m, b) +} +func (m *NodeGetInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeGetInfoResponse.Marshal(b, m, deterministic) +} +func (m *NodeGetInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeGetInfoResponse.Merge(m, src) +} +func (m *NodeGetInfoResponse) XXX_Size() int { + return xxx_messageInfo_NodeGetInfoResponse.Size(m) +} +func (m *NodeGetInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeGetInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeGetInfoResponse proto.InternalMessageInfo + +func (m *NodeGetInfoResponse) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *NodeGetInfoResponse) GetMaxVolumesPerNode() int64 { + if m != nil { + return m.MaxVolumesPerNode + } + return 0 +} + +func (m *NodeGetInfoResponse) GetAccessibleTopology() *Topology { + if m != nil { + return m.AccessibleTopology + } + return nil +} + +type NodeExpandVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId,proto3" json:"volume_id,omitempty"` + // The path on which volume is available. This field is REQUIRED. + VolumePath string `protobuf:"bytes,2,opt,name=volume_path,json=volumePath,proto3" json:"volume_path,omitempty"` + // This allows CO to specify the capacity requirements of the volume + // after expansion. If capacity_range is omitted then a plugin MAY + // inspect the file system of the volume to determine the maximum + // capacity to which the volume can be expanded. In such cases a + // plugin MAY expand the volume to its maximum capacity. + // This field is OPTIONAL. + CapacityRange *CapacityRange `protobuf:"bytes,3,opt,name=capacity_range,json=capacityRange,proto3" json:"capacity_range,omitempty"` + // The path where the volume is staged, if the plugin has the + // STAGE_UNSTAGE_VOLUME capability, otherwise empty. + // If not empty, it MUST be an absolute path in the root + // filesystem of the process serving this request. + // This field is OPTIONAL. + StagingTargetPath string `protobuf:"bytes,4,opt,name=staging_target_path,json=stagingTargetPath,proto3" json:"staging_target_path,omitempty"` + // Volume capability describing how the CO intends to use this volume. + // This allows SP to determine if volume is being used as a block + // device or mounted file system. For example - if volume is being + // used as a block device the SP MAY choose to skip expanding the + // filesystem in NodeExpandVolume implementation but still perform + // rest of the housekeeping needed for expanding the volume. If + // volume_capability is omitted the SP MAY determine + // access_type from given volume_path for the volume and perform + // node expansion. This is an OPTIONAL field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeExpandVolumeRequest) Reset() { *m = NodeExpandVolumeRequest{} } +func (m *NodeExpandVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeExpandVolumeRequest) ProtoMessage() {} +func (*NodeExpandVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{55} +} + +func (m *NodeExpandVolumeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeExpandVolumeRequest.Unmarshal(m, b) +} +func (m *NodeExpandVolumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeExpandVolumeRequest.Marshal(b, m, deterministic) +} +func (m *NodeExpandVolumeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeExpandVolumeRequest.Merge(m, src) +} +func (m *NodeExpandVolumeRequest) XXX_Size() int { + return xxx_messageInfo_NodeExpandVolumeRequest.Size(m) +} +func (m *NodeExpandVolumeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeExpandVolumeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeExpandVolumeRequest proto.InternalMessageInfo + +func (m *NodeExpandVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeExpandVolumeRequest) GetVolumePath() string { + if m != nil { + return m.VolumePath + } + return "" +} + +func (m *NodeExpandVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *NodeExpandVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodeExpandVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +type NodeExpandVolumeResponse struct { + // The capacity of the volume in bytes. This field is OPTIONAL. + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes,proto3" json:"capacity_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeExpandVolumeResponse) Reset() { *m = NodeExpandVolumeResponse{} } +func (m *NodeExpandVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeExpandVolumeResponse) ProtoMessage() {} +func (*NodeExpandVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9cdb00adce470e01, []int{56} +} + +func (m *NodeExpandVolumeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeExpandVolumeResponse.Unmarshal(m, b) +} +func (m *NodeExpandVolumeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeExpandVolumeResponse.Marshal(b, m, deterministic) +} +func (m *NodeExpandVolumeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeExpandVolumeResponse.Merge(m, src) +} +func (m *NodeExpandVolumeResponse) XXX_Size() int { + return xxx_messageInfo_NodeExpandVolumeResponse.Size(m) +} +func (m *NodeExpandVolumeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeExpandVolumeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeExpandVolumeResponse proto.InternalMessageInfo + +func (m *NodeExpandVolumeResponse) GetCapacityBytes() int64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +var E_CsiSecret = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 1059, + Name: "csi.v1.csi_secret", + Tag: "varint,1059,opt,name=csi_secret", + Filename: "github.com/container-storage-interface/spec/csi.proto", +} + +func init() { + proto.RegisterEnum("csi.v1.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) + proto.RegisterEnum("csi.v1.PluginCapability_VolumeExpansion_Type", PluginCapability_VolumeExpansion_Type_name, PluginCapability_VolumeExpansion_Type_value) + proto.RegisterEnum("csi.v1.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) + proto.RegisterEnum("csi.v1.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.v1.VolumeUsage_Unit", VolumeUsage_Unit_name, VolumeUsage_Unit_value) + proto.RegisterEnum("csi.v1.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) + proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v1.GetPluginInfoRequest") + proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v1.GetPluginInfoResponse") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetPluginInfoResponse.ManifestEntry") + proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v1.GetPluginCapabilitiesRequest") + proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v1.GetPluginCapabilitiesResponse") + proto.RegisterType((*PluginCapability)(nil), "csi.v1.PluginCapability") + proto.RegisterType((*PluginCapability_Service)(nil), "csi.v1.PluginCapability.Service") + proto.RegisterType((*PluginCapability_VolumeExpansion)(nil), "csi.v1.PluginCapability.VolumeExpansion") + proto.RegisterType((*ProbeRequest)(nil), "csi.v1.ProbeRequest") + proto.RegisterType((*ProbeResponse)(nil), "csi.v1.ProbeResponse") + proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v1.CreateVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateVolumeRequest.SecretsEntry") + proto.RegisterType((*VolumeContentSource)(nil), "csi.v1.VolumeContentSource") + proto.RegisterType((*VolumeContentSource_SnapshotSource)(nil), "csi.v1.VolumeContentSource.SnapshotSource") + proto.RegisterType((*VolumeContentSource_VolumeSource)(nil), "csi.v1.VolumeContentSource.VolumeSource") + proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v1.CreateVolumeResponse") + proto.RegisterType((*VolumeCapability)(nil), "csi.v1.VolumeCapability") + proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v1.VolumeCapability.BlockVolume") + proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v1.VolumeCapability.MountVolume") + proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v1.VolumeCapability.AccessMode") + proto.RegisterType((*CapacityRange)(nil), "csi.v1.CapacityRange") + proto.RegisterType((*Volume)(nil), "csi.v1.Volume") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.Volume.VolumeContextEntry") + proto.RegisterType((*TopologyRequirement)(nil), "csi.v1.TopologyRequirement") + proto.RegisterType((*Topology)(nil), "csi.v1.Topology") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.Topology.SegmentsEntry") + proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v1.DeleteVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteVolumeRequest.SecretsEntry") + proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v1.DeleteVolumeResponse") + proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v1.ControllerPublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeRequest.VolumeContextEntry") + proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v1.ControllerPublishVolumeResponse") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerPublishVolumeResponse.PublishContextEntry") + proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v1.ControllerUnpublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerUnpublishVolumeRequest.SecretsEntry") + proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v1.ControllerUnpublishVolumeResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesRequest.VolumeContextEntry") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse_Confirmed)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ValidateVolumeCapabilitiesResponse.Confirmed.VolumeContextEntry") + proto.RegisterType((*ListVolumesRequest)(nil), "csi.v1.ListVolumesRequest") + proto.RegisterType((*ListVolumesResponse)(nil), "csi.v1.ListVolumesResponse") + proto.RegisterType((*ListVolumesResponse_VolumeStatus)(nil), "csi.v1.ListVolumesResponse.VolumeStatus") + proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v1.ListVolumesResponse.Entry") + proto.RegisterType((*GetCapacityRequest)(nil), "csi.v1.GetCapacityRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.GetCapacityRequest.ParametersEntry") + proto.RegisterType((*GetCapacityResponse)(nil), "csi.v1.GetCapacityResponse") + proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v1.ControllerGetCapabilitiesRequest") + proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v1.ControllerGetCapabilitiesResponse") + proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v1.ControllerServiceCapability") + proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v1.ControllerServiceCapability.RPC") + proto.RegisterType((*CreateSnapshotRequest)(nil), "csi.v1.CreateSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.ParametersEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.CreateSnapshotRequest.SecretsEntry") + proto.RegisterType((*CreateSnapshotResponse)(nil), "csi.v1.CreateSnapshotResponse") + proto.RegisterType((*Snapshot)(nil), "csi.v1.Snapshot") + proto.RegisterType((*DeleteSnapshotRequest)(nil), "csi.v1.DeleteSnapshotRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.DeleteSnapshotRequest.SecretsEntry") + proto.RegisterType((*DeleteSnapshotResponse)(nil), "csi.v1.DeleteSnapshotResponse") + proto.RegisterType((*ListSnapshotsRequest)(nil), "csi.v1.ListSnapshotsRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ListSnapshotsRequest.SecretsEntry") + proto.RegisterType((*ListSnapshotsResponse)(nil), "csi.v1.ListSnapshotsResponse") + proto.RegisterType((*ListSnapshotsResponse_Entry)(nil), "csi.v1.ListSnapshotsResponse.Entry") + proto.RegisterType((*ControllerExpandVolumeRequest)(nil), "csi.v1.ControllerExpandVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.ControllerExpandVolumeRequest.SecretsEntry") + proto.RegisterType((*ControllerExpandVolumeResponse)(nil), "csi.v1.ControllerExpandVolumeResponse") + proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v1.NodeStageVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.PublishContextEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodeStageVolumeRequest.VolumeContextEntry") + proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v1.NodeStageVolumeResponse") + proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v1.NodeUnstageVolumeRequest") + proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v1.NodeUnstageVolumeResponse") + proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v1.NodePublishVolumeRequest") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.PublishContextEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.SecretsEntry") + proto.RegisterMapType((map[string]string)(nil), "csi.v1.NodePublishVolumeRequest.VolumeContextEntry") + proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v1.NodePublishVolumeResponse") + proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v1.NodeUnpublishVolumeRequest") + proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v1.NodeUnpublishVolumeResponse") + proto.RegisterType((*NodeGetVolumeStatsRequest)(nil), "csi.v1.NodeGetVolumeStatsRequest") + proto.RegisterType((*NodeGetVolumeStatsResponse)(nil), "csi.v1.NodeGetVolumeStatsResponse") + proto.RegisterType((*VolumeUsage)(nil), "csi.v1.VolumeUsage") + proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v1.NodeGetCapabilitiesRequest") + proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v1.NodeGetCapabilitiesResponse") + proto.RegisterType((*NodeServiceCapability)(nil), "csi.v1.NodeServiceCapability") + proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v1.NodeServiceCapability.RPC") + proto.RegisterType((*NodeGetInfoRequest)(nil), "csi.v1.NodeGetInfoRequest") + proto.RegisterType((*NodeGetInfoResponse)(nil), "csi.v1.NodeGetInfoResponse") + proto.RegisterType((*NodeExpandVolumeRequest)(nil), "csi.v1.NodeExpandVolumeRequest") + proto.RegisterType((*NodeExpandVolumeResponse)(nil), "csi.v1.NodeExpandVolumeResponse") + proto.RegisterExtension(E_CsiSecret) +} + +func init() { + proto.RegisterFile("github.com/container-storage-interface/spec/csi.proto", fileDescriptor_9cdb00adce470e01) +} + +var fileDescriptor_9cdb00adce470e01 = []byte{ + // 3366 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4d, 0x70, 0xdb, 0xc6, + 0xf5, 0x27, 0xf8, 0x25, 0xea, 0xe9, 0xc3, 0xf4, 0xea, 0xc3, 0x34, 0x24, 0xd9, 0x32, 0x1c, 0x3b, + 0xb2, 0x63, 0xd3, 0xff, 0x28, 0x71, 0xe6, 0x1f, 0x5b, 0x69, 0x43, 0x51, 0xb4, 0xc4, 0x98, 0xa6, + 0x14, 0x90, 0x92, 0x63, 0xb7, 0x19, 0x04, 0x22, 0x57, 0x34, 0x26, 0x24, 0xc0, 0x00, 0xa0, 0x2a, + 0xf5, 0xd2, 0x99, 0xf6, 0xd4, 0x69, 0xef, 0x6d, 0x4f, 0x9d, 0x49, 0x6f, 0x6d, 0x33, 0x39, 0x75, + 0x7a, 0xec, 0x4c, 0x0f, 0x3d, 0xf4, 0xd0, 0xe9, 0xad, 0x9d, 0x5e, 0x72, 0xed, 0x64, 0xda, 0x99, + 0x4c, 0x8f, 0x9d, 0x1e, 0x3a, 0xc0, 0x2e, 0x40, 0x2c, 0x08, 0x80, 0xa4, 0x65, 0x4f, 0x0e, 0x3d, + 0x49, 0x7c, 0xfb, 0xf6, 0xed, 0xdb, 0xb7, 0xef, 0xbd, 0x7d, 0xef, 0xb7, 0x80, 0xbb, 0x2d, 0xc5, + 0x7c, 0xd6, 0x3b, 0xcc, 0x37, 0xb4, 0xce, 0x9d, 0x86, 0xa6, 0x9a, 0xb2, 0xa2, 0x62, 0xfd, 0xb6, + 0x61, 0x6a, 0xba, 0xdc, 0xc2, 0xb7, 0x15, 0xd5, 0xc4, 0xfa, 0x91, 0xdc, 0xc0, 0x77, 0x8c, 0x2e, + 0x6e, 0xdc, 0x69, 0x18, 0x4a, 0xbe, 0xab, 0x6b, 0xa6, 0x86, 0xd2, 0xd6, 0xbf, 0xc7, 0xaf, 0xf3, + 0xab, 0x2d, 0x4d, 0x6b, 0xb5, 0xf1, 0x1d, 0x9b, 0x7a, 0xd8, 0x3b, 0xba, 0xd3, 0xc4, 0x46, 0x43, + 0x57, 0xba, 0xa6, 0xa6, 0x13, 0x4e, 0xfe, 0xb2, 0x9f, 0xc3, 0x54, 0x3a, 0xd8, 0x30, 0xe5, 0x4e, + 0x97, 0x32, 0x5c, 0xf2, 0x33, 0x7c, 0x47, 0x97, 0xbb, 0x5d, 0xac, 0x1b, 0x64, 0x5c, 0x58, 0x84, + 0xf9, 0x6d, 0x6c, 0xee, 0xb5, 0x7b, 0x2d, 0x45, 0x2d, 0xab, 0x47, 0x9a, 0x88, 0x3f, 0xe9, 0x61, + 0xc3, 0x14, 0xfe, 0xca, 0xc1, 0x82, 0x6f, 0xc0, 0xe8, 0x6a, 0xaa, 0x81, 0x11, 0x82, 0xa4, 0x2a, + 0x77, 0x70, 0x8e, 0x5b, 0xe5, 0xd6, 0x26, 0x45, 0xfb, 0x7f, 0x74, 0x0d, 0x66, 0x8f, 0xb1, 0xda, + 0xd4, 0x74, 0xe9, 0x18, 0xeb, 0x86, 0xa2, 0xa9, 0xb9, 0xb8, 0x3d, 0x3a, 0x43, 0xa8, 0x07, 0x84, + 0x88, 0xb6, 0x21, 0xd3, 0x91, 0x55, 0xe5, 0x08, 0x1b, 0x66, 0x2e, 0xb1, 0x9a, 0x58, 0x9b, 0x5a, + 0x7f, 0x2d, 0x4f, 0xb6, 0x9a, 0x0f, 0x5c, 0x2b, 0xff, 0x88, 0x72, 0x97, 0x54, 0x53, 0x3f, 0x15, + 0xdd, 0xc9, 0xfc, 0x7d, 0x98, 0x61, 0x86, 0x50, 0x16, 0x12, 0x1f, 0xe3, 0x53, 0xaa, 0x93, 0xf5, + 0x2f, 0x9a, 0x87, 0xd4, 0xb1, 0xdc, 0xee, 0x61, 0xaa, 0x09, 0xf9, 0x71, 0x2f, 0xfe, 0xff, 0x9c, + 0x70, 0x09, 0x96, 0xdd, 0xd5, 0x8a, 0x72, 0x57, 0x3e, 0x54, 0xda, 0x8a, 0xa9, 0x60, 0xc3, 0xd9, + 0xfa, 0x87, 0xb0, 0x12, 0x32, 0x4e, 0x2d, 0xb0, 0x01, 0xd3, 0x0d, 0x0f, 0x3d, 0xc7, 0xd9, 0x5b, + 0xc9, 0x39, 0x5b, 0xf1, 0xcd, 0x3c, 0x15, 0x19, 0x6e, 0xe1, 0x4f, 0x09, 0xc8, 0xfa, 0x59, 0xd0, + 0x06, 0x4c, 0x18, 0x58, 0x3f, 0x56, 0x1a, 0xc4, 0xae, 0x53, 0xeb, 0xab, 0x61, 0xd2, 0xf2, 0x35, + 0xc2, 0xb7, 0x13, 0x13, 0x9d, 0x29, 0x68, 0x1f, 0xb2, 0xc7, 0x5a, 0xbb, 0xd7, 0xc1, 0x12, 0x3e, + 0xe9, 0xca, 0xaa, 0x7b, 0x00, 0x53, 0xeb, 0x6b, 0xa1, 0x62, 0x0e, 0xec, 0x09, 0x25, 0x87, 0x7f, + 0x27, 0x26, 0x9e, 0x3b, 0x66, 0x49, 0xfc, 0x4f, 0x38, 0x98, 0xa0, 0xab, 0xa1, 0xb7, 0x21, 0x69, + 0x9e, 0x76, 0x89, 0x76, 0xb3, 0xeb, 0xd7, 0x86, 0x69, 0x97, 0xaf, 0x9f, 0x76, 0xb1, 0x68, 0x4f, + 0x11, 0xde, 0x87, 0xa4, 0xf5, 0x0b, 0x4d, 0xc1, 0xc4, 0x7e, 0xf5, 0x61, 0x75, 0xf7, 0x71, 0x35, + 0x1b, 0x43, 0x8b, 0x80, 0x8a, 0xbb, 0xd5, 0xba, 0xb8, 0x5b, 0xa9, 0x94, 0x44, 0xa9, 0x56, 0x12, + 0x0f, 0xca, 0xc5, 0x52, 0x96, 0x43, 0xaf, 0xc0, 0xea, 0xc1, 0x6e, 0x65, 0xff, 0x51, 0x49, 0x2a, + 0x14, 0x8b, 0xa5, 0x5a, 0xad, 0xbc, 0x59, 0xae, 0x94, 0xeb, 0x4f, 0xa4, 0xe2, 0x6e, 0xb5, 0x56, + 0x17, 0x0b, 0xe5, 0x6a, 0xbd, 0x96, 0x8d, 0xf3, 0xdf, 0xe7, 0xe0, 0x9c, 0x6f, 0x03, 0xa8, 0xc0, + 0x68, 0x78, 0x7b, 0xd4, 0x8d, 0x7b, 0x35, 0xbd, 0x15, 0xa4, 0x29, 0x40, 0x7a, 0xb7, 0x5a, 0x29, + 0x57, 0x2d, 0xed, 0xa6, 0x60, 0x62, 0xf7, 0xc1, 0x03, 0xfb, 0x47, 0x7c, 0x33, 0x4d, 0x16, 0x14, + 0x66, 0x61, 0x7a, 0x4f, 0xd7, 0x0e, 0xb1, 0xe3, 0x3f, 0x05, 0x98, 0xa1, 0xbf, 0xa9, 0xbf, 0xfc, + 0x1f, 0xa4, 0x74, 0x2c, 0x37, 0x4f, 0xe9, 0xd1, 0xf2, 0x79, 0x12, 0x93, 0x79, 0x27, 0x26, 0xf3, + 0x9b, 0x9a, 0xd6, 0x3e, 0xb0, 0xfc, 0x53, 0x24, 0x8c, 0xc2, 0x57, 0x49, 0x98, 0x2b, 0xea, 0x58, + 0x36, 0x31, 0xd1, 0x96, 0x8a, 0x0e, 0x8c, 0xbd, 0x0d, 0x98, 0xb5, 0xfc, 0xab, 0xa1, 0x98, 0xa7, + 0x92, 0x2e, 0xab, 0x2d, 0x4c, 0x8f, 0x7e, 0xc1, 0xb1, 0x40, 0x91, 0x8e, 0x8a, 0xd6, 0xa0, 0x38, + 0xd3, 0xf0, 0xfe, 0x44, 0x65, 0x98, 0xa3, 0xae, 0xc3, 0xb8, 0x74, 0x82, 0x75, 0x69, 0xa2, 0x85, + 0xc7, 0xa5, 0xd1, 0x31, 0x4b, 0x51, 0xb0, 0x81, 0x1e, 0x02, 0x74, 0x65, 0x5d, 0xee, 0x60, 0x13, + 0xeb, 0x46, 0x2e, 0xc9, 0xc6, 0x77, 0xc0, 0x6e, 0xf2, 0x7b, 0x2e, 0x37, 0x89, 0x6f, 0xcf, 0x74, + 0xb4, 0x6d, 0x05, 0x44, 0x43, 0xc7, 0xa6, 0x91, 0x4b, 0xd9, 0x92, 0xd6, 0xa2, 0x24, 0xd5, 0x08, + 0xab, 0x2d, 0x66, 0x33, 0xf1, 0xd3, 0x4d, 0x4e, 0x74, 0x66, 0xa3, 0x5d, 0x58, 0x70, 0x36, 0xa8, + 0xa9, 0x26, 0x56, 0x4d, 0xc9, 0xd0, 0x7a, 0x7a, 0x03, 0xe7, 0xd2, 0xb6, 0x95, 0x96, 0x7c, 0x5b, + 0x24, 0x3c, 0x35, 0x9b, 0x45, 0xa4, 0xa6, 0x61, 0x88, 0xe8, 0x29, 0xf0, 0x72, 0xa3, 0x81, 0x0d, + 0x43, 0x21, 0xb6, 0x90, 0x74, 0xfc, 0x49, 0x4f, 0xd1, 0x71, 0x07, 0xab, 0xa6, 0x91, 0x9b, 0x60, + 0xa5, 0xd6, 0xb5, 0xae, 0xd6, 0xd6, 0x5a, 0xa7, 0x62, 0x9f, 0x47, 0xbc, 0xc8, 0x4c, 0xf7, 0x8c, + 0x18, 0xfc, 0x3b, 0x70, 0xce, 0x67, 0x94, 0x71, 0x32, 0x1b, 0x7f, 0x0f, 0xa6, 0xbd, 0x96, 0x18, + 0x2b, 0x2b, 0xfe, 0x28, 0x0e, 0x73, 0x01, 0x36, 0x40, 0x3b, 0x90, 0x31, 0x54, 0xb9, 0x6b, 0x3c, + 0xd3, 0x4c, 0xea, 0xbf, 0x37, 0x23, 0x4c, 0x96, 0xaf, 0x51, 0x5e, 0xf2, 0x73, 0x27, 0x26, 0xba, + 0xb3, 0xd1, 0x26, 0xa4, 0x89, 0x3d, 0xfd, 0xb9, 0x29, 0x48, 0x0e, 0xa1, 0xb9, 0x52, 0xe8, 0x4c, + 0xfe, 0x75, 0x98, 0x65, 0x57, 0x40, 0x97, 0x61, 0xca, 0x59, 0x41, 0x52, 0x9a, 0x74, 0xaf, 0xe0, + 0x90, 0xca, 0x4d, 0xfe, 0x35, 0x98, 0xf6, 0x0a, 0x43, 0x4b, 0x30, 0x49, 0x1d, 0xc2, 0x65, 0xcf, + 0x10, 0x42, 0xb9, 0xe9, 0xc6, 0xf4, 0x37, 0x60, 0x9e, 0xf5, 0x33, 0x1a, 0xca, 0xd7, 0xdd, 0x3d, + 0x10, 0x5b, 0xcc, 0xb2, 0x7b, 0x70, 0xf4, 0x14, 0x7e, 0x99, 0x84, 0xac, 0x3f, 0x68, 0xd0, 0x06, + 0xa4, 0x0e, 0xdb, 0x5a, 0xe3, 0x63, 0x3a, 0xf7, 0x95, 0xb0, 0xe8, 0xca, 0x6f, 0x5a, 0x5c, 0x84, + 0xba, 0x13, 0x13, 0xc9, 0x24, 0x6b, 0x76, 0x47, 0xeb, 0xa9, 0x26, 0xb5, 0x5e, 0xf8, 0xec, 0x47, + 0x16, 0x57, 0x7f, 0xb6, 0x3d, 0x09, 0x6d, 0xc1, 0x14, 0x71, 0x3b, 0xa9, 0xa3, 0x35, 0x71, 0x2e, + 0x61, 0xcb, 0xb8, 0x1a, 0x2a, 0xa3, 0x60, 0xf3, 0x3e, 0xd2, 0x9a, 0x58, 0x04, 0xd9, 0xfd, 0x9f, + 0x9f, 0x81, 0x29, 0x8f, 0x6e, 0xfc, 0x36, 0x4c, 0x79, 0x16, 0x43, 0x17, 0x60, 0xe2, 0xc8, 0x90, + 0xdc, 0x24, 0x3c, 0x29, 0xa6, 0x8f, 0x0c, 0x3b, 0x9f, 0x5e, 0x86, 0x29, 0x5b, 0x0b, 0xe9, 0xa8, + 0x2d, 0xb7, 0x8c, 0x5c, 0x7c, 0x35, 0x61, 0x9d, 0x91, 0x4d, 0x7a, 0x60, 0x51, 0xf8, 0x7f, 0x70, + 0x00, 0xfd, 0x25, 0xd1, 0x06, 0x24, 0x6d, 0x2d, 0x49, 0x2a, 0x5f, 0x1b, 0x41, 0xcb, 0xbc, 0xad, + 0xaa, 0x3d, 0x4b, 0xf8, 0x39, 0x07, 0x49, 0x5b, 0x8c, 0xff, 0xc2, 0xa9, 0x95, 0xab, 0xdb, 0x95, + 0x92, 0x54, 0xdd, 0xdd, 0x2a, 0x49, 0x8f, 0xc5, 0x72, 0xbd, 0x24, 0x66, 0x39, 0xb4, 0x04, 0x17, + 0xbc, 0x74, 0xb1, 0x54, 0xd8, 0x2a, 0x89, 0xd2, 0x6e, 0xb5, 0xf2, 0x24, 0x1b, 0x47, 0x3c, 0x2c, + 0x3e, 0xda, 0xaf, 0xd4, 0xcb, 0x83, 0x63, 0x09, 0xb4, 0x0c, 0x39, 0xcf, 0x18, 0x95, 0x41, 0xc5, + 0x26, 0x2d, 0xb1, 0x9e, 0x51, 0xf2, 0x2f, 0x1d, 0x4c, 0x6d, 0xce, 0xb8, 0x87, 0x61, 0x3b, 0xdb, + 0x63, 0x98, 0x61, 0x72, 0xb4, 0x55, 0x4e, 0xd1, 0xa4, 0xd2, 0x94, 0x0e, 0x4f, 0x4d, 0xbb, 0xc4, + 0xe0, 0xd6, 0x12, 0xe2, 0x8c, 0x43, 0xdd, 0xb4, 0x88, 0x96, 0x59, 0xdb, 0x4a, 0x47, 0x31, 0x29, + 0x4f, 0xdc, 0xe6, 0x01, 0x9b, 0x64, 0x33, 0x08, 0x5f, 0xc4, 0x21, 0x4d, 0xcf, 0xe6, 0x9a, 0xe7, + 0x96, 0x60, 0x44, 0x3a, 0x54, 0x22, 0x92, 0x09, 0x8e, 0x38, 0x1b, 0x1c, 0x68, 0x07, 0x66, 0xbd, + 0xa9, 0xf4, 0xc4, 0x29, 0xe2, 0xae, 0xb0, 0x07, 0xe4, 0x8d, 0xe7, 0x13, 0x5a, 0xba, 0xcd, 0x1c, + 0x7b, 0x69, 0x68, 0x13, 0x66, 0x7d, 0xd9, 0x38, 0x39, 0x3c, 0x1b, 0xcf, 0x34, 0x98, 0xc4, 0x54, + 0x80, 0x39, 0x27, 0x91, 0xb6, 0xb1, 0x64, 0xd2, 0x44, 0x4b, 0x6f, 0x8b, 0xec, 0x40, 0x02, 0x46, + 0x7d, 0x66, 0x87, 0xc6, 0xbf, 0x0b, 0x68, 0x50, 0xd7, 0xb1, 0xb2, 0x66, 0x0f, 0xe6, 0x02, 0x52, + 0x3c, 0xca, 0xc3, 0xa4, 0x7d, 0x54, 0x86, 0x62, 0x62, 0x5a, 0x1e, 0x0e, 0x6a, 0xd4, 0x67, 0xb1, + 0xf8, 0xbb, 0x3a, 0x3e, 0xc2, 0xba, 0x8e, 0x9b, 0x76, 0x78, 0x04, 0xf2, 0xbb, 0x2c, 0xc2, 0x0f, + 0x38, 0xc8, 0x38, 0x74, 0x74, 0x0f, 0x32, 0x06, 0x6e, 0x91, 0xeb, 0x87, 0xac, 0x75, 0xc9, 0x3f, + 0x37, 0x5f, 0xa3, 0x0c, 0xb4, 0x90, 0x76, 0xf8, 0xad, 0x42, 0x9a, 0x19, 0x1a, 0x6b, 0xf3, 0xbf, + 0xe5, 0x60, 0x6e, 0x0b, 0xb7, 0xb1, 0xbf, 0x4a, 0x89, 0xca, 0xb0, 0xde, 0x8b, 0x3d, 0xce, 0x5e, + 0xec, 0x01, 0xa2, 0x22, 0x2e, 0xf6, 0x33, 0x5d, 0x76, 0x8b, 0x30, 0xcf, 0xae, 0x46, 0xd2, 0xbb, + 0xf0, 0xcf, 0x04, 0x5c, 0xb2, 0x7c, 0x41, 0xd7, 0xda, 0x6d, 0xac, 0xef, 0xf5, 0x0e, 0xdb, 0x8a, + 0xf1, 0x6c, 0x8c, 0xcd, 0x5d, 0x80, 0x09, 0x55, 0x6b, 0x7a, 0x82, 0x27, 0x6d, 0xfd, 0x2c, 0x37, + 0x51, 0x09, 0xce, 0xfb, 0xcb, 0xac, 0x53, 0x9a, 0x84, 0xc3, 0x8b, 0xac, 0xec, 0xb1, 0xff, 0x06, + 0xe1, 0x21, 0x63, 0x15, 0x88, 0x9a, 0xda, 0x3e, 0xb5, 0x23, 0x26, 0x23, 0xba, 0xbf, 0x91, 0xe8, + 0xaf, 0x98, 0xde, 0x70, 0x2b, 0xa6, 0xc8, 0x1d, 0x45, 0x15, 0x4f, 0x1f, 0x0d, 0x44, 0x7c, 0xda, + 0x16, 0xfd, 0xf6, 0x88, 0xa2, 0x87, 0x66, 0x82, 0xb3, 0x9c, 0xe2, 0x0b, 0x08, 0xdf, 0x3f, 0x72, + 0x70, 0x39, 0x74, 0x0b, 0xf4, 0xca, 0x6f, 0xc2, 0xb9, 0x2e, 0x19, 0x70, 0x8d, 0x40, 0xa2, 0xec, + 0xfe, 0x50, 0x23, 0xd0, 0x2e, 0x96, 0x52, 0x19, 0x33, 0xcc, 0x76, 0x19, 0x22, 0x5f, 0x80, 0xb9, + 0x00, 0xb6, 0xb1, 0x36, 0xf3, 0x25, 0x07, 0xab, 0x7d, 0x55, 0xf6, 0xd5, 0xee, 0x8b, 0x73, 0xdf, + 0x7a, 0xdf, 0xb7, 0x48, 0xca, 0xbf, 0x3b, 0xb8, 0xf7, 0xe0, 0x05, 0x5f, 0x56, 0x04, 0x5f, 0x85, + 0x2b, 0x11, 0x4b, 0xd3, 0x70, 0xfe, 0x22, 0x09, 0x57, 0x0e, 0xe4, 0xb6, 0xd2, 0x74, 0x0b, 0xb9, + 0x80, 0x7e, 0x3f, 0xda, 0x24, 0x8d, 0x81, 0x08, 0x20, 0x59, 0x6b, 0xc3, 0x8d, 0xda, 0x61, 0xf2, + 0x47, 0xb8, 0x0e, 0x5f, 0x60, 0x13, 0xf6, 0x24, 0xa0, 0x09, 0x7b, 0x7b, 0x74, 0x5d, 0xa3, 0x5a, + 0xb2, 0x7d, 0x7f, 0x82, 0x79, 0x6b, 0x74, 0xb9, 0x11, 0x5e, 0x70, 0xe6, 0x28, 0xfe, 0x3a, 0xbb, + 0xa6, 0xdf, 0x27, 0x41, 0x88, 0xda, 0x3d, 0xcd, 0x21, 0x22, 0x4c, 0x36, 0x34, 0xf5, 0x48, 0xd1, + 0x3b, 0xb8, 0x49, 0xab, 0xff, 0x37, 0x47, 0x31, 0x1e, 0x4d, 0x20, 0x45, 0x67, 0xae, 0xd8, 0x17, + 0x83, 0x72, 0x30, 0xd1, 0xc1, 0x86, 0x21, 0xb7, 0x1c, 0xb5, 0x9c, 0x9f, 0xfc, 0x67, 0x09, 0x98, + 0x74, 0xa7, 0x20, 0x75, 0xc0, 0x83, 0x49, 0xfa, 0xda, 0x7e, 0x1e, 0x05, 0x9e, 0xdf, 0x99, 0xe3, + 0xcf, 0xe1, 0xcc, 0x4d, 0xc6, 0x99, 0x49, 0x38, 0x6c, 0x3d, 0x97, 0xda, 0x11, 0x7e, 0xfd, 0xb5, + 0x3b, 0xa0, 0xf0, 0x6d, 0x40, 0x15, 0xc5, 0xa0, 0x5d, 0x94, 0x9b, 0x96, 0xac, 0xa6, 0x49, 0x3e, + 0x91, 0xb0, 0x6a, 0xea, 0x0a, 0x2d, 0xd7, 0x53, 0x22, 0x74, 0xe4, 0x93, 0x12, 0xa1, 0x58, 0x25, + 0xbd, 0x61, 0xca, 0xba, 0xa9, 0xa8, 0x2d, 0xc9, 0xd4, 0x3e, 0xc6, 0x2e, 0xe8, 0xea, 0x50, 0xeb, + 0x16, 0x51, 0xf8, 0x34, 0x0e, 0x73, 0x8c, 0x78, 0xea, 0x93, 0xf7, 0x61, 0xa2, 0x2f, 0x9b, 0x29, + 0xe3, 0x03, 0xb8, 0xf3, 0xc4, 0x6c, 0xce, 0x0c, 0xb4, 0x02, 0xa0, 0xe2, 0x13, 0x93, 0x59, 0x77, + 0xd2, 0xa2, 0xd8, 0x6b, 0xf2, 0x1b, 0x6e, 0xcf, 0x6d, 0xca, 0x66, 0xcf, 0x40, 0xb7, 0x00, 0xd1, + 0x0c, 0x8d, 0x9b, 0x12, 0xbd, 0x62, 0xc8, 0xb2, 0x93, 0x62, 0xd6, 0x1d, 0xa9, 0xda, 0x97, 0x8d, + 0xc1, 0x7f, 0x02, 0x29, 0x62, 0xc4, 0x11, 0xbb, 0x6d, 0xf4, 0x2e, 0xa4, 0x0d, 0x7b, 0x21, 0x3f, + 0xb2, 0x10, 0xb4, 0x13, 0xaf, 0x62, 0x22, 0x9d, 0x27, 0x7c, 0x16, 0x07, 0xb4, 0x8d, 0x4d, 0xb7, + 0x0d, 0xa3, 0x67, 0x10, 0xe2, 0xcb, 0xdc, 0x73, 0xf8, 0xf2, 0x7b, 0x8c, 0x2f, 0x93, 0x68, 0xb8, + 0xe9, 0x41, 0xbf, 0x7d, 0x4b, 0x47, 0x66, 0xe2, 0x90, 0xd6, 0x87, 0xd4, 0x93, 0xa3, 0xb5, 0x3e, + 0x67, 0x74, 0xd9, 0x2d, 0x98, 0x63, 0x74, 0xa6, 0x3e, 0x75, 0x1b, 0x90, 0x7c, 0x2c, 0x2b, 0x6d, + 0xd9, 0xd2, 0xcb, 0xe9, 0x2c, 0x69, 0xa7, 0x79, 0xde, 0x1d, 0x71, 0xa6, 0x09, 0x82, 0xb7, 0x60, + 0xa1, 0xf2, 0xfc, 0x68, 0x7c, 0xdb, 0x7b, 0xd1, 0x0f, 0xf0, 0xd0, 0x75, 0xb7, 0x03, 0x11, 0xf9, + 0xab, 0x83, 0x45, 0x0a, 0x85, 0xa7, 0x43, 0xc1, 0xf9, 0x5f, 0x25, 0x60, 0x29, 0x82, 0x1b, 0xdd, + 0x87, 0x84, 0xde, 0x6d, 0x50, 0x77, 0x7c, 0x75, 0x04, 0xf9, 0x79, 0x71, 0xaf, 0xb8, 0x13, 0x13, + 0xad, 0x59, 0xfc, 0x1f, 0xe2, 0x90, 0x10, 0xf7, 0x8a, 0xe8, 0x5d, 0x06, 0xa9, 0xbe, 0x35, 0xa2, + 0x14, 0x2f, 0x50, 0xfd, 0x1f, 0x2e, 0x08, 0xa9, 0xce, 0xc1, 0x7c, 0x51, 0x2c, 0x15, 0xea, 0x25, + 0x69, 0xab, 0x54, 0x29, 0xd5, 0x4b, 0x12, 0x41, 0xd2, 0xb3, 0x1c, 0x5a, 0x86, 0xdc, 0xde, 0xfe, + 0x66, 0xa5, 0x5c, 0xdb, 0x91, 0xf6, 0xab, 0xce, 0x7f, 0x74, 0x34, 0x8e, 0xb2, 0x30, 0x5d, 0x29, + 0xd7, 0xea, 0x94, 0x50, 0xcb, 0x26, 0x2c, 0xca, 0x76, 0xa9, 0x2e, 0x15, 0x0b, 0x7b, 0x85, 0x62, + 0xb9, 0xfe, 0x24, 0x9b, 0x44, 0x3c, 0x2c, 0xb2, 0xb2, 0x6b, 0xd5, 0xc2, 0x5e, 0x6d, 0x67, 0xb7, + 0x9e, 0x4d, 0x21, 0x04, 0xb3, 0xf6, 0x7c, 0x87, 0x54, 0xcb, 0xa6, 0x2d, 0x09, 0xc5, 0xca, 0x6e, + 0xd5, 0xd5, 0x61, 0x02, 0xcd, 0x43, 0xd6, 0x59, 0x59, 0x2c, 0x15, 0xb6, 0x6c, 0x14, 0x25, 0x83, + 0xce, 0xc3, 0x4c, 0xe9, 0x83, 0xbd, 0x42, 0x75, 0xcb, 0x61, 0x9c, 0x44, 0xab, 0xb0, 0xec, 0x55, + 0x47, 0xa2, 0xb3, 0x4a, 0x5b, 0x36, 0x96, 0x52, 0xcb, 0x82, 0x8b, 0xd2, 0x7d, 0x19, 0x87, 0x05, + 0x02, 0xd3, 0x39, 0xa0, 0xa0, 0x13, 0xb8, 0x6b, 0x90, 0x25, 0xc0, 0x82, 0xe4, 0x2f, 0xed, 0x66, + 0x09, 0xfd, 0xc0, 0x29, 0xf0, 0x1c, 0x48, 0x3d, 0xee, 0x81, 0xd4, 0xcb, 0xfe, 0x72, 0xf7, 0x26, + 0x0b, 0x3e, 0xfb, 0x56, 0x8b, 0xea, 0xa0, 0x1e, 0x05, 0xd4, 0x63, 0xb7, 0xa3, 0xa5, 0x45, 0xdd, + 0x55, 0x67, 0x69, 0x97, 0xce, 0x18, 0xf2, 0x0f, 0x60, 0xd1, 0xaf, 0x2f, 0x8d, 0xbe, 0x5b, 0x03, + 0x10, 0xb1, 0x9b, 0x83, 0x5c, 0x5e, 0x97, 0x43, 0xf8, 0x0b, 0x07, 0x19, 0x87, 0x6c, 0xdd, 0x23, + 0x86, 0xf2, 0x5d, 0xcc, 0x40, 0x52, 0x93, 0x16, 0xc5, 0x45, 0xb8, 0xbc, 0xe0, 0x6e, 0xdc, 0x0f, + 0xee, 0x06, 0x9e, 0x73, 0x22, 0xf0, 0x9c, 0xbf, 0x09, 0x33, 0x0d, 0x4b, 0x7d, 0x45, 0x53, 0x25, + 0x53, 0xe9, 0x38, 0x88, 0xd3, 0xe0, 0x63, 0x4c, 0xdd, 0x79, 0x41, 0x15, 0xa7, 0x9d, 0x09, 0x16, + 0x09, 0xad, 0xc2, 0xb4, 0xfd, 0x38, 0x23, 0x99, 0x9a, 0xd4, 0x33, 0x70, 0x2e, 0x65, 0xf7, 0xdf, + 0x60, 0xd3, 0xea, 0xda, 0xbe, 0x81, 0x85, 0xdf, 0x71, 0xb0, 0x40, 0x60, 0x05, 0xbf, 0x3b, 0x0e, + 0x03, 0xa9, 0xbd, 0x1e, 0xe7, 0xbb, 0x1a, 0x02, 0x05, 0xbe, 0xac, 0xae, 0x2a, 0x07, 0x8b, 0xfe, + 0xf5, 0x68, 0x2b, 0xf5, 0x79, 0x1c, 0xe6, 0xad, 0xdb, 0xd4, 0x19, 0x78, 0xd1, 0x65, 0xca, 0x18, + 0x27, 0xe9, 0x33, 0x66, 0x72, 0xc0, 0x98, 0x3b, 0xfe, 0x46, 0xe5, 0x86, 0xb7, 0x1e, 0xf0, 0xef, + 0xe0, 0x65, 0xd9, 0xf2, 0xd7, 0x1c, 0x2c, 0xf8, 0xd6, 0xa3, 0xf1, 0xf2, 0x8e, 0xbf, 0xf2, 0xba, + 0x1a, 0xa2, 0xdf, 0x73, 0xd5, 0x5e, 0x77, 0x9d, 0xea, 0x69, 0xbc, 0xb0, 0xfc, 0x73, 0x1c, 0x56, + 0xfa, 0x37, 0x90, 0xfd, 0x3c, 0xda, 0x1c, 0x03, 0x3a, 0x38, 0xdb, 0x2b, 0xe4, 0xfb, 0xfe, 0x84, + 0xbb, 0x3e, 0x78, 0x29, 0x06, 0xa8, 0x14, 0x95, 0x78, 0x03, 0x11, 0xb7, 0xe4, 0xb8, 0x88, 0xdb, + 0x99, 0x3c, 0xe0, 0x7b, 0x5e, 0x30, 0x91, 0x55, 0x9f, 0x7a, 0xc2, 0x88, 0xa8, 0xfc, 0x5b, 0x70, + 0xc1, 0x2e, 0x9a, 0xdd, 0xd7, 0x7d, 0xe7, 0xcd, 0x91, 0xa4, 0xc4, 0x8c, 0xb8, 0x60, 0x0d, 0xbb, + 0x4f, 0xda, 0x14, 0x89, 0x6e, 0x0a, 0x5f, 0x25, 0x61, 0xd1, 0x2a, 0xaa, 0x6b, 0xa6, 0xdc, 0x1a, + 0x07, 0xa3, 0xfd, 0xd6, 0x20, 0xe4, 0x15, 0x67, 0x8f, 0x25, 0x58, 0xea, 0x28, 0x48, 0x17, 0xca, + 0xc3, 0x9c, 0x61, 0xca, 0x2d, 0x3b, 0x1d, 0xc8, 0x7a, 0x0b, 0x9b, 0x52, 0x57, 0x36, 0x9f, 0xd1, + 0x58, 0x3f, 0x4f, 0x87, 0xea, 0xf6, 0xc8, 0x9e, 0x6c, 0x3e, 0x7b, 0x41, 0x07, 0x89, 0xde, 0xf3, + 0x27, 0x85, 0xd7, 0x86, 0xec, 0x25, 0xc2, 0xb7, 0x3e, 0x08, 0x81, 0x45, 0x5f, 0x1f, 0x22, 0x72, + 0x38, 0x1c, 0x7a, 0x76, 0x18, 0xf0, 0x6b, 0x46, 0x54, 0x2f, 0xc2, 0x85, 0x81, 0xcd, 0xd3, 0x2b, + 0xa4, 0x05, 0x39, 0x6b, 0x68, 0x5f, 0x35, 0xc6, 0x74, 0xc7, 0x10, 0x8f, 0x89, 0x87, 0x78, 0x8c, + 0xb0, 0x04, 0x17, 0x03, 0x16, 0xa2, 0x5a, 0xfc, 0x26, 0x45, 0xd4, 0x18, 0x1f, 0xdc, 0xff, 0x30, + 0x2c, 0x2a, 0xde, 0xf4, 0x1e, 0x7b, 0x20, 0x0e, 0xfe, 0x32, 0xe2, 0xe2, 0x32, 0x4c, 0x79, 0xf9, + 0xe8, 0x35, 0x68, 0x0e, 0x09, 0x9c, 0xd4, 0x99, 0xde, 0x1c, 0xd2, 0xbe, 0x37, 0x87, 0x4a, 0x3f, + 0xa8, 0x26, 0xd8, 0xd2, 0x36, 0xd4, 0x14, 0x11, 0x61, 0xf5, 0x74, 0x20, 0xac, 0x32, 0xec, 0x43, + 0x46, 0xa8, 0xd0, 0xff, 0x81, 0xc0, 0xa2, 0x4e, 0x1d, 0xf8, 0xc2, 0x20, 0x3c, 0x05, 0x9e, 0x78, + 0xfc, 0xf8, 0x98, 0xbf, 0xcf, 0x8d, 0xe2, 0x7e, 0x37, 0x12, 0x56, 0x60, 0x29, 0x50, 0x36, 0x5d, + 0xfa, 0x87, 0x1c, 0x51, 0x6c, 0x1b, 0x9b, 0x7d, 0x64, 0xc5, 0x18, 0x75, 0x69, 0x3a, 0xe8, 0x5d, + 0x9a, 0x90, 0x6c, 0x0f, 0x1e, 0x33, 0x24, 0x84, 0x6d, 0x62, 0x06, 0xbf, 0x2a, 0xf4, 0xb2, 0xbd, + 0x01, 0xa9, 0x9e, 0x0d, 0x97, 0x92, 0xa2, 0x6b, 0x8e, 0x8d, 0x81, 0x7d, 0x6b, 0x48, 0x24, 0x1c, + 0xc2, 0xe7, 0x1c, 0x4c, 0x79, 0xc8, 0x68, 0x19, 0x26, 0x5d, 0xf4, 0xc2, 0xe9, 0x52, 0x5c, 0x82, + 0x75, 0x68, 0xa6, 0x66, 0xca, 0x6d, 0xfa, 0x02, 0x4f, 0x7e, 0x58, 0x8d, 0x65, 0xcf, 0xc0, 0xa4, + 0x88, 0x4d, 0x88, 0xf6, 0xff, 0xe8, 0x16, 0x24, 0x7b, 0xaa, 0x62, 0xda, 0xc1, 0x3a, 0xeb, 0x8f, + 0x42, 0x7b, 0xa9, 0xfc, 0xbe, 0xaa, 0x98, 0xa2, 0xcd, 0x25, 0xdc, 0x84, 0xa4, 0xf5, 0x8b, 0x6d, + 0xf2, 0x27, 0x21, 0xb5, 0xf9, 0xa4, 0x5e, 0xaa, 0x65, 0x39, 0x04, 0x90, 0x2e, 0x93, 0x96, 0x38, + 0x2e, 0x2c, 0xbb, 0x5b, 0x0f, 0x02, 0x51, 0x3e, 0x22, 0x67, 0x18, 0x06, 0x9f, 0x14, 0x02, 0xe1, + 0x93, 0x15, 0xe6, 0x36, 0x1b, 0x02, 0x9c, 0xfc, 0x8b, 0x83, 0x85, 0x40, 0x3e, 0x74, 0xd7, 0x0b, + 0x99, 0x5c, 0x89, 0x94, 0xe9, 0x05, 0x4b, 0x7e, 0xc6, 0x11, 0xb0, 0xe4, 0x1e, 0x03, 0x96, 0x5c, + 0x1f, 0x3a, 0xdf, 0x0b, 0x93, 0x1c, 0x84, 0xa0, 0x24, 0xb5, 0x7a, 0x61, 0xbb, 0x24, 0xed, 0x57, + 0xc9, 0x5f, 0x17, 0x25, 0x99, 0x87, 0xec, 0x76, 0xc9, 0xc1, 0x1d, 0xa4, 0x5a, 0xbd, 0x50, 0xaf, + 0x65, 0xe3, 0x83, 0x08, 0x45, 0xc2, 0xc5, 0x1f, 0xe6, 0x01, 0x51, 0xb3, 0x7a, 0x3f, 0x9d, 0xfd, + 0x94, 0x83, 0x39, 0x86, 0x4c, 0xad, 0xec, 0x79, 0x5d, 0xe3, 0x98, 0xd7, 0xb5, 0x3b, 0x30, 0x6f, + 0xb5, 0x50, 0xc4, 0xf1, 0x0d, 0xa9, 0x8b, 0x75, 0x1b, 0x23, 0xa5, 0xee, 0x74, 0xbe, 0x23, 0x9f, + 0x50, 0x0c, 0x73, 0x0f, 0xeb, 0x96, 0xe0, 0x17, 0x80, 0xff, 0x09, 0x3f, 0x8e, 0x93, 0x8b, 0x7a, + 0xec, 0x42, 0x7f, 0x68, 0xd0, 0x0e, 0x76, 0x02, 0x89, 0x31, 0x3a, 0x81, 0x90, 0x90, 0x4f, 0x8e, + 0x55, 0x1d, 0x8e, 0x7d, 0xc9, 0x09, 0x05, 0x52, 0x14, 0x9c, 0xa1, 0x48, 0x5f, 0xff, 0x37, 0x07, + 0x99, 0x72, 0x13, 0xab, 0xa6, 0xe5, 0xf4, 0x55, 0x98, 0x61, 0xbe, 0x68, 0x46, 0xcb, 0x21, 0x1f, + 0x3a, 0xdb, 0x16, 0xe7, 0x57, 0x22, 0x3f, 0x83, 0x16, 0x62, 0xe8, 0xc8, 0xf3, 0x35, 0x36, 0x03, + 0x2b, 0xbf, 0x32, 0x30, 0x33, 0x20, 0xfe, 0xf9, 0x6b, 0x43, 0xb8, 0xdc, 0x75, 0xde, 0x82, 0x94, + 0xfd, 0xed, 0x2a, 0x9a, 0x77, 0xbf, 0x9f, 0xf5, 0x7c, 0xda, 0xca, 0x2f, 0xf8, 0xa8, 0xce, 0xbc, + 0xf5, 0xbf, 0x67, 0x00, 0xfa, 0xbd, 0x0e, 0x7a, 0x08, 0xd3, 0xde, 0xcf, 0xe7, 0xd0, 0x52, 0xc4, + 0xc7, 0x9b, 0xfc, 0x72, 0xf0, 0xa0, 0xab, 0xd3, 0x43, 0x98, 0xf6, 0x7e, 0xac, 0xd1, 0x17, 0x16, + 0xf0, 0xc1, 0x48, 0x5f, 0x58, 0xe0, 0xf7, 0x1d, 0x31, 0xd4, 0x86, 0x0b, 0x21, 0xcf, 0xf5, 0xe8, + 0xfa, 0x68, 0x1f, 0x35, 0xf0, 0xaf, 0x8e, 0xf8, 0xee, 0x2f, 0xc4, 0x90, 0x0e, 0x17, 0x43, 0x5f, + 0xa9, 0xd1, 0xda, 0xa8, 0x6f, 0xe8, 0xfc, 0x8d, 0x11, 0x38, 0xdd, 0x35, 0x7b, 0xc0, 0x87, 0x3f, + 0x8d, 0xa1, 0x1b, 0x23, 0xbf, 0xd9, 0xf2, 0x37, 0x47, 0x7f, 0x69, 0x13, 0x62, 0x68, 0x07, 0xa6, + 0x3c, 0xaf, 0x2d, 0x88, 0x0f, 0x7c, 0x82, 0x21, 0x82, 0x97, 0x22, 0x9e, 0x67, 0x88, 0x24, 0xcf, + 0xdb, 0x42, 0x5f, 0xd2, 0xe0, 0x23, 0x49, 0x5f, 0x52, 0xc0, 0x63, 0x84, 0xdf, 0xfc, 0xbe, 0xcb, + 0x2f, 0xc8, 0xfc, 0xc1, 0xb7, 0x67, 0x90, 0xf9, 0x43, 0x6e, 0x52, 0x21, 0x86, 0xde, 0x87, 0x59, + 0x16, 0x26, 0x45, 0x2b, 0x91, 0x70, 0x2f, 0x7f, 0x29, 0x6c, 0xd8, 0x2b, 0x92, 0x45, 0xe5, 0xfa, + 0x22, 0x03, 0xd1, 0xc1, 0xbe, 0xc8, 0x10, 0x30, 0x2f, 0x66, 0xe5, 0x27, 0x06, 0x6b, 0xea, 0xe7, + 0xa7, 0x20, 0x88, 0xac, 0x9f, 0x9f, 0x02, 0x01, 0x2a, 0x21, 0x86, 0x14, 0x58, 0x0c, 0x86, 0x3a, + 0xd0, 0xb5, 0x91, 0x90, 0x1c, 0xfe, 0xfa, 0x30, 0x36, 0x37, 0xd5, 0xfc, 0x2d, 0x05, 0x49, 0xfb, + 0x16, 0xac, 0xc3, 0x39, 0x5f, 0xab, 0x89, 0x2e, 0x45, 0x37, 0xe0, 0xfc, 0xe5, 0xd0, 0x71, 0x77, + 0x27, 0x4f, 0xe1, 0xfc, 0x40, 0xf3, 0x88, 0x56, 0xbd, 0xf3, 0x82, 0x1a, 0x58, 0xfe, 0x4a, 0x04, + 0x87, 0x5f, 0x36, 0x9b, 0x76, 0x56, 0x87, 0x75, 0x37, 0xac, 0xec, 0xb0, 0x54, 0xf3, 0x11, 0x29, + 0x3a, 0xfc, 0x49, 0x46, 0x60, 0xf5, 0x0a, 0x4c, 0x2f, 0x57, 0x23, 0x79, 0xdc, 0x15, 0x3e, 0x74, + 0xab, 0x1d, 0x4f, 0x75, 0x8d, 0x18, 0xe5, 0x02, 0x9b, 0x00, 0x5e, 0x88, 0x62, 0x71, 0xc5, 0x3f, + 0x86, 0xac, 0xff, 0x0a, 0x46, 0xcc, 0x79, 0x05, 0xb9, 0xcd, 0x6a, 0x38, 0x83, 0xdf, 0x32, 0xfe, + 0xf8, 0xf7, 0x6b, 0x15, 0x14, 0xf9, 0x57, 0x23, 0x79, 0xbc, 0x19, 0xcb, 0x53, 0xf0, 0xf5, 0x33, + 0xd6, 0x60, 0x71, 0xd8, 0xcf, 0x58, 0x01, 0x15, 0xa2, 0x10, 0xbb, 0xf7, 0x0e, 0x40, 0xc3, 0x50, + 0x24, 0xd2, 0x11, 0xa3, 0x95, 0x81, 0xc7, 0x89, 0x07, 0x0a, 0x6e, 0x37, 0x77, 0xbb, 0xa6, 0xa2, + 0xa9, 0x46, 0xee, 0x17, 0x19, 0xbb, 0x1d, 0x9f, 0x6c, 0x18, 0x0a, 0x69, 0x4c, 0x37, 0x53, 0x4f, + 0x13, 0x0d, 0x43, 0x39, 0x4c, 0xdb, 0xfc, 0x6f, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x16, 0x7c, + 0xfe, 0x46, 0x7f, 0x36, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// IdentityClient is the client API for Identity service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type IdentityClient interface { + GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) + GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) + Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) +} + +type identityClient struct { + cc *grpc.ClientConn +} + +func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { + return &identityClient{cc} +} + +func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { + out := new(GetPluginInfoResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) { + out := new(GetPluginCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Identity/GetPluginCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) { + out := new(ProbeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Identity/Probe", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IdentityServer is the server API for Identity service. +type IdentityServer interface { + GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) + GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) + Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) +} + +// UnimplementedIdentityServer can be embedded to have forward compatible implementations. +type UnimplementedIdentityServer struct { +} + +func (*UnimplementedIdentityServer) GetPluginInfo(ctx context.Context, req *GetPluginInfoRequest) (*GetPluginInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPluginInfo not implemented") +} +func (*UnimplementedIdentityServer) GetPluginCapabilities(ctx context.Context, req *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetPluginCapabilities not implemented") +} +func (*UnimplementedIdentityServer) Probe(ctx context.Context, req *ProbeRequest) (*ProbeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Probe not implemented") +} + +func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { + s.RegisterService(&_Identity_serviceDesc, srv) +} + +func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Identity/GetPluginInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Identity/GetPluginCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProbeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).Probe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Identity/Probe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Identity_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Identity", + HandlerType: (*IdentityServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetPluginInfo", + Handler: _Identity_GetPluginInfo_Handler, + }, + { + MethodName: "GetPluginCapabilities", + Handler: _Identity_GetPluginCapabilities_Handler, + }, + { + MethodName: "Probe", + Handler: _Identity_Probe_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/container-storage-interface/spec/csi.proto", +} + +// ControllerClient is the client API for Controller service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ControllerClient interface { + CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) + DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) + ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) + GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) + ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) + DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) + ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) + ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) +} + +type controllerClient struct { + cc *grpc.ClientConn +} + +func NewControllerClient(cc *grpc.ClientConn) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { + out := new(CreateVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { + out := new(DeleteVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + out := new(ControllerPublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerPublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + out := new(ControllerUnpublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerUnpublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { + out := new(ValidateVolumeCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ValidateVolumeCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { + out := new(ListVolumesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListVolumes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { + out := new(GetCapacityResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/GetCapacity", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { + out := new(ControllerGetCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerGetCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) { + out := new(CreateSnapshotResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/CreateSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) { + out := new(DeleteSnapshotResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/DeleteSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerExpandVolume(ctx context.Context, in *ControllerExpandVolumeRequest, opts ...grpc.CallOption) (*ControllerExpandVolumeResponse, error) { + out := new(ControllerExpandVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Controller/ControllerExpandVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ControllerServer is the server API for Controller service. +type ControllerServer interface { + CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) + DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) + ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) + GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) + ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) + CreateSnapshot(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) + DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) + ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) + ControllerExpandVolume(context.Context, *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) +} + +// UnimplementedControllerServer can be embedded to have forward compatible implementations. +type UnimplementedControllerServer struct { +} + +func (*UnimplementedControllerServer) CreateVolume(ctx context.Context, req *CreateVolumeRequest) (*CreateVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateVolume not implemented") +} +func (*UnimplementedControllerServer) DeleteVolume(ctx context.Context, req *DeleteVolumeRequest) (*DeleteVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteVolume not implemented") +} +func (*UnimplementedControllerServer) ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerPublishVolume not implemented") +} +func (*UnimplementedControllerServer) ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerUnpublishVolume not implemented") +} +func (*UnimplementedControllerServer) ValidateVolumeCapabilities(ctx context.Context, req *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateVolumeCapabilities not implemented") +} +func (*UnimplementedControllerServer) ListVolumes(ctx context.Context, req *ListVolumesRequest) (*ListVolumesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListVolumes not implemented") +} +func (*UnimplementedControllerServer) GetCapacity(ctx context.Context, req *GetCapacityRequest) (*GetCapacityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCapacity not implemented") +} +func (*UnimplementedControllerServer) ControllerGetCapabilities(ctx context.Context, req *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerGetCapabilities not implemented") +} +func (*UnimplementedControllerServer) CreateSnapshot(ctx context.Context, req *CreateSnapshotRequest) (*CreateSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented") +} +func (*UnimplementedControllerServer) DeleteSnapshot(ctx context.Context, req *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") +} +func (*UnimplementedControllerServer) ListSnapshots(ctx context.Context, req *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedControllerServer) ControllerExpandVolume(ctx context.Context, req *ControllerExpandVolumeRequest) (*ControllerExpandVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ControllerExpandVolume not implemented") +} + +func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { + s.RegisterService(&_Controller_serviceDesc, srv) +} + +func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/CreateVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/DeleteVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerPublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerPublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateVolumeCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ValidateVolumeCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVolumesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListVolumes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ListVolumes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCapacityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).GetCapacity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/GetCapacity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/CreateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/DeleteSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerExpandVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerExpandVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Controller/ControllerExpandVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerExpandVolume(ctx, req.(*ControllerExpandVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Controller_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateVolume", + Handler: _Controller_CreateVolume_Handler, + }, + { + MethodName: "DeleteVolume", + Handler: _Controller_DeleteVolume_Handler, + }, + { + MethodName: "ControllerPublishVolume", + Handler: _Controller_ControllerPublishVolume_Handler, + }, + { + MethodName: "ControllerUnpublishVolume", + Handler: _Controller_ControllerUnpublishVolume_Handler, + }, + { + MethodName: "ValidateVolumeCapabilities", + Handler: _Controller_ValidateVolumeCapabilities_Handler, + }, + { + MethodName: "ListVolumes", + Handler: _Controller_ListVolumes_Handler, + }, + { + MethodName: "GetCapacity", + Handler: _Controller_GetCapacity_Handler, + }, + { + MethodName: "ControllerGetCapabilities", + Handler: _Controller_ControllerGetCapabilities_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _Controller_CreateSnapshot_Handler, + }, + { + MethodName: "DeleteSnapshot", + Handler: _Controller_DeleteSnapshot_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _Controller_ListSnapshots_Handler, + }, + { + MethodName: "ControllerExpandVolume", + Handler: _Controller_ControllerExpandVolume_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/container-storage-interface/spec/csi.proto", +} + +// NodeClient is the client API for Node service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NodeClient interface { + NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) + NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) + NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) + NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) + NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) +} + +type nodeClient struct { + cc *grpc.ClientConn +} + +func NewNodeClient(cc *grpc.ClientConn) NodeClient { + return &nodeClient{cc} +} + +func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) { + out := new(NodeStageVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeStageVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) { + out := new(NodeUnstageVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnstageVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { + out := new(NodePublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodePublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { + out := new(NodeUnpublishVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeUnpublishVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetVolumeStats(ctx context.Context, in *NodeGetVolumeStatsRequest, opts ...grpc.CallOption) (*NodeGetVolumeStatsResponse, error) { + out := new(NodeGetVolumeStatsResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetVolumeStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeExpandVolume(ctx context.Context, in *NodeExpandVolumeRequest, opts ...grpc.CallOption) (*NodeExpandVolumeResponse, error) { + out := new(NodeExpandVolumeResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeExpandVolume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { + out := new(NodeGetCapabilitiesResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetCapabilities", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetInfo(ctx context.Context, in *NodeGetInfoRequest, opts ...grpc.CallOption) (*NodeGetInfoResponse, error) { + out := new(NodeGetInfoResponse) + err := c.cc.Invoke(ctx, "/csi.v1.Node/NodeGetInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NodeServer is the server API for Node service. +type NodeServer interface { + NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) + NodeGetVolumeStats(context.Context, *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) + NodeExpandVolume(context.Context, *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) + NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) + NodeGetInfo(context.Context, *NodeGetInfoRequest) (*NodeGetInfoResponse, error) +} + +// UnimplementedNodeServer can be embedded to have forward compatible implementations. +type UnimplementedNodeServer struct { +} + +func (*UnimplementedNodeServer) NodeStageVolume(ctx context.Context, req *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeStageVolume not implemented") +} +func (*UnimplementedNodeServer) NodeUnstageVolume(ctx context.Context, req *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeUnstageVolume not implemented") +} +func (*UnimplementedNodeServer) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodePublishVolume not implemented") +} +func (*UnimplementedNodeServer) NodeUnpublishVolume(ctx context.Context, req *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeUnpublishVolume not implemented") +} +func (*UnimplementedNodeServer) NodeGetVolumeStats(ctx context.Context, req *NodeGetVolumeStatsRequest) (*NodeGetVolumeStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetVolumeStats not implemented") +} +func (*UnimplementedNodeServer) NodeExpandVolume(ctx context.Context, req *NodeExpandVolumeRequest) (*NodeExpandVolumeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeExpandVolume not implemented") +} +func (*UnimplementedNodeServer) NodeGetCapabilities(ctx context.Context, req *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetCapabilities not implemented") +} +func (*UnimplementedNodeServer) NodeGetInfo(ctx context.Context, req *NodeGetInfoRequest) (*NodeGetInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeGetInfo not implemented") +} + +func RegisterNodeServer(s *grpc.Server, srv NodeServer) { + s.RegisterService(&_Node_serviceDesc, srv) +} + +func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeStageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeStageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeStageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnstageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnstageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeUnstageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodePublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodePublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodePublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetVolumeStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetVolumeStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetVolumeStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeGetVolumeStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetVolumeStats(ctx, req.(*NodeGetVolumeStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeExpandVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeExpandVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeExpandVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeExpandVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeExpandVolume(ctx, req.(*NodeExpandVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v1.Node/NodeGetInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetInfo(ctx, req.(*NodeGetInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Node_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v1.Node", + HandlerType: (*NodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NodeStageVolume", + Handler: _Node_NodeStageVolume_Handler, + }, + { + MethodName: "NodeUnstageVolume", + Handler: _Node_NodeUnstageVolume_Handler, + }, + { + MethodName: "NodePublishVolume", + Handler: _Node_NodePublishVolume_Handler, + }, + { + MethodName: "NodeUnpublishVolume", + Handler: _Node_NodeUnpublishVolume_Handler, + }, + { + MethodName: "NodeGetVolumeStats", + Handler: _Node_NodeGetVolumeStats_Handler, + }, + { + MethodName: "NodeExpandVolume", + Handler: _Node_NodeExpandVolume_Handler, + }, + { + MethodName: "NodeGetCapabilities", + Handler: _Node_NodeGetCapabilities_Handler, + }, + { + MethodName: "NodeGetInfo", + Handler: _Node_NodeGetInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/container-storage-interface/spec/csi.proto", +} diff --git a/vendor/vendor.json b/vendor/vendor.json index c45a12138..6e2394693 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -87,6 +87,7 @@ {"path":"github.com/circonus-labs/circonus-gometrics/api/config","checksumSHA1":"bQhz/fcyZPmuHSH2qwC4ZtATy5c=","revision":"d6e3aea90ab9f90fe8456e13fc520f43d102da4d","revisionTime":"2019-01-28T15:50:09Z","version":"=v2","versionExact":"v2"}, {"path":"github.com/circonus-labs/circonus-gometrics/checkmgr","checksumSHA1":"Ij8yB33E0Kk+GfTkNRoF1mG26dc=","revision":"d6e3aea90ab9f90fe8456e13fc520f43d102da4d","revisionTime":"2019-01-28T15:50:09Z","version":"=v2","versionExact":"v2"}, {"path":"github.com/circonus-labs/circonusllhist","checksumSHA1":"VbfeVqeOM+dTNxCmpvmYS0LwQn0=","revision":"7d649b46cdc2cd2ed102d350688a75a4fd7778c6","revisionTime":"2016-11-21T13:51:53Z"}, + {"path":"github.com/container-storage-interface/spec/lib/go/csi","checksumSHA1":"UG2eSIhT6aFn6zWuz48IhlO+eEE=","revision":"a33ece0a8a9f9449688bad8c3ddb103ecf58749b","revisionTime":"2019-10-21T21:08:49Z","tree":true}, {"path":"github.com/containerd/console","checksumSHA1":"Lc9okmPYuvnmj2yWbW/ioFh2LJE=","revision":"8375c3424e4d7b114e8a90a4a40c8e1b40d1d4e6","revisionTime":"2019-12-19T16:52:38Z"}, {"path":"github.com/containerd/containerd/errdefs","checksumSHA1":"ru5eKWdLzXfpNRL+Mi1bxbmY8DU=","revision":"14fbcd886f6e971d86f6e3bed43298491d89f393","revisionTime":"2020-03-14T00:01:32Z"}, {"path":"github.com/containerd/continuity/pathdriver","checksumSHA1":"GqIrOttKaO7k6HIaHQLPr3cY7rY=","origin":"github.com/docker/docker/vendor/github.com/containerd/continuity/pathdriver","revision":"320063a2ad06a1d8ada61c94c29dbe44e2d87473","revisionTime":"2018-08-16T08:14:46Z"}, From e2450bb25174e5c4e21ca42663356cd3ec50d633 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Sun, 12 Jan 2020 14:47:07 +0100 Subject: [PATCH 002/126] vendor: go-hclog 0.10.1 -> 0.11.0 --- .../hashicorp/go-hclog/interceptlogger.go | 13 +++++++++++ .../hashicorp/go-hclog/intlogger.go | 23 +++++++++++-------- .../github.com/hashicorp/go-hclog/logger.go | 3 +++ .../hashicorp/go-hclog/nulllogger.go | 2 ++ vendor/vendor.json | 2 +- 5 files changed, 33 insertions(+), 10 deletions(-) diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go index 7b76b4b89..7e86dc878 100644 --- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go @@ -30,6 +30,19 @@ func NewInterceptLogger(opts *LoggerOptions) InterceptLogger { return intercept } +func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) { + i.Logger.Log(level, msg, args...) + if atomic.LoadInt32(i.sinkCount) == 0 { + return + } + + i.mu.Lock() + defer i.mu.Unlock() + for s := range i.Sinks { + s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...) + } +} + // Emit the message and args at TRACE level to log and sinks func (i *interceptLogger) Trace(msg string, args ...interface{}) { i.Logger.Trace(msg, args...) diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go index 65129ff74..0786c924b 100644 --- a/vendor/github.com/hashicorp/go-hclog/intlogger.go +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -120,7 +120,7 @@ func newLogger(opts *LoggerOptions) *intLogger { // Log a message and a set of key/value pairs if the given level is at // or more severe that the threshold configured in the Logger. -func (l *intLogger) Log(name string, level Level, msg string, args ...interface{}) { +func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) { if level < Level(atomic.LoadInt32(l.level)) { return } @@ -133,7 +133,7 @@ func (l *intLogger) Log(name string, level Level, msg string, args ...interface{ if l.json { l.logJSON(t, name, level, msg, args...) } else { - l.log(t, name, level, msg, args...) + l.logPlain(t, name, level, msg, args...) } l.writer.Flush(level) @@ -171,7 +171,7 @@ func trimCallerPath(path string) string { var logImplFile = regexp.MustCompile(`github.com/hashicorp/go-hclog/.+logger.go$`) // Non-JSON logging format function -func (l *intLogger) log(t time.Time, name string, level Level, msg string, args ...interface{}) { +func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) { l.writer.WriteString(t.Format(l.timeFormat)) l.writer.WriteByte(' ') @@ -431,29 +431,34 @@ func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg strin return vals } +// Emit the message and args at the provided level +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + l.log(l.Name(), level, msg, args...) +} + // Emit the message and args at DEBUG level func (l *intLogger) Debug(msg string, args ...interface{}) { - l.Log(l.Name(), Debug, msg, args...) + l.log(l.Name(), Debug, msg, args...) } // Emit the message and args at TRACE level func (l *intLogger) Trace(msg string, args ...interface{}) { - l.Log(l.Name(), Trace, msg, args...) + l.log(l.Name(), Trace, msg, args...) } // Emit the message and args at INFO level func (l *intLogger) Info(msg string, args ...interface{}) { - l.Log(l.Name(), Info, msg, args...) + l.log(l.Name(), Info, msg, args...) } // Emit the message and args at WARN level func (l *intLogger) Warn(msg string, args ...interface{}) { - l.Log(l.Name(), Warn, msg, args...) + l.log(l.Name(), Warn, msg, args...) } // Emit the message and args at ERROR level func (l *intLogger) Error(msg string, args ...interface{}) { - l.Log(l.Name(), Error, msg, args...) + l.log(l.Name(), Error, msg, args...) } // Indicate that the logger would emit TRACE level logs @@ -593,7 +598,7 @@ func (l *intLogger) checkWriterIsFile() *os.File { // Accept implements the SinkAdapter interface func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) { - i.Log(name, level, msg, args...) + i.log(name, level, msg, args...) } // ImpliedArgs returns the loggers implied args diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go index 48d608714..95b03184f 100644 --- a/vendor/github.com/hashicorp/go-hclog/logger.go +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -95,6 +95,9 @@ type Logger interface { // Args are alternating key, val pairs // keys must be strings // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at a provided log level + Log(level Level, msg string, args ...interface{}) + // Emit a message and key/value pairs at the TRACE level Trace(msg string, args ...interface{}) diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go index 4abdd5583..bc14f7708 100644 --- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -15,6 +15,8 @@ func NewNullLogger() Logger { type nullLogger struct{} +func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {} + func (l *nullLogger) Trace(msg string, args ...interface{}) {} func (l *nullLogger) Debug(msg string, args ...interface{}) {} diff --git a/vendor/vendor.json b/vendor/vendor.json index 6e2394693..5148440e9 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -240,7 +240,7 @@ {"path":"github.com/hashicorp/go-envparse","checksumSHA1":"FKmqR4DC3nCXtnT9pe02z5CLNWo=","revision":"310ca1881b22af3522e3a8638c0b426629886196","revisionTime":"2018-01-19T21:58:41Z"}, {"path":"github.com/hashicorp/go-getter","checksumSHA1":"d4brua17AGQqMNtngK4xKOUwboY=","revision":"f5101da0117392c6e7960c934f05a2fd689a5b5f","revisionTime":"2019-08-22T19:45:07Z"}, {"path":"github.com/hashicorp/go-getter/helper/url","checksumSHA1":"9J+kDr29yDrwsdu2ULzewmqGjpA=","revision":"b345bfcec894fb7ff3fdf9b21baf2f56ea423d98","revisionTime":"2018-04-10T17:49:45Z"}, - {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"p0y3e3+Oj9GJXM/OW3ISDXap5+w=","revision":"e8a977f5d6b14a15e6672edf8b1d6cd545388c7a","revisionTime":"2019-12-18T17:30:18Z","version":"v0.10.1","versionExact":"v0.10.1"}, + {"path":"github.com/hashicorp/go-hclog","checksumSHA1":"tNgHh706sto5/99XYD5jIuBDqa8=","revision":"0e86804c9e4bede0738cbbc370e705ef82580e7e","revisionTime":"2020-01-11T00:06:39Z","version":"v0.11.0","versionExact":"v0.11.0"}, {"path":"github.com/hashicorp/go-immutable-radix","checksumSHA1":"Cas2nprG6pWzf05A2F/OlnjUu2Y=","revision":"8aac2701530899b64bdea735a1de8da899815220","revisionTime":"2017-07-25T22:12:15Z"}, {"path":"github.com/hashicorp/go-memdb","checksumSHA1":"FMAvwDar2bQyYAW4XMFhAt0J5xA=","revision":"20ff6434c1cc49b80963d45bf5c6aa89c78d8d57","revisionTime":"2017-08-31T20:15:40Z"}, {"path":"github.com/hashicorp/go-msgpack/codec","checksumSHA1":"CKGYNUDKre3Z2g4hHNVfp5nTcfA=","revision":"23165f7bc3c2dda1891434ebb9da1511a7bafc1c","revisionTime":"2019-09-27T12:33:13Z","version":"upstream-08f7b40","versionExact":"upstream-08f7b40"}, From 55ed831c563e1563a2d926f1dacf8652d3660b0c Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Fri, 13 Mar 2020 12:04:48 -0400 Subject: [PATCH 003/126] vendor: Add github.com/grpc-ecosystem/go-grpc-middleware/retry --- .../grpc-ecosystem/go-grpc-middleware/LICENSE | 201 +++++++++++ .../go-grpc-middleware/retry/backoff.go | 44 +++ .../go-grpc-middleware/retry/doc.go | 25 ++ .../go-grpc-middleware/retry/options.go | 142 ++++++++ .../go-grpc-middleware/retry/retry.go | 323 ++++++++++++++++++ .../util/backoffutils/backoff.go | 28 ++ .../go-grpc-middleware/util/metautils/doc.go | 19 ++ .../util/metautils/nicemd.go | 126 +++++++ .../util/metautils/single_key.go | 22 ++ vendor/vendor.json | 3 + 10 files changed, 933 insertions(+) create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go create mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE new file mode 100644 index 000000000..b2b065037 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go new file mode 100644 index 000000000..ad35f09a8 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/backoff.go @@ -0,0 +1,44 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils" +) + +// BackoffLinear is very simple: it waits for a fixed period of time between calls. +func BackoffLinear(waitBetween time.Duration) BackoffFunc { + return func(attempt uint) time.Duration { + return waitBetween + } +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func BackoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) BackoffFunc { + return func(attempt uint) time.Duration { + return backoffutils.JitterUp(waitBetween, jitterFraction) + } +} + +// BackoffExponential produces increasing intervals for each attempt. +// +// The scalar is multiplied times 2 raised to the current attempt. So the first +// retry with a scalar of 100ms is 100ms, while the 5th attempt would be 1.6s. +func BackoffExponential(scalar time.Duration) BackoffFunc { + return func(attempt uint) time.Duration { + return scalar * time.Duration(backoffutils.ExponentBase2(attempt)) + } +} + +// BackoffExponentialWithJitter creates an exponential backoff like +// BackoffExponential does, but adds jitter. +func BackoffExponentialWithJitter(scalar time.Duration, jitterFraction float64) BackoffFunc { + return func(attempt uint) time.Duration { + return backoffutils.JitterUp(scalar*time.Duration(backoffutils.ExponentBase2(attempt)), jitterFraction) + } +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go new file mode 100644 index 000000000..afd924a14 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go @@ -0,0 +1,25 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +`grpc_retry` provides client-side request retry logic for gRPC. + +Client-Side Request Retry Interceptor + +It allows for automatic retry, inside the generated gRPC code of requests based on the gRPC status +of the reply. It supports unary (1:1), and server stream (1:n) requests. + +By default the interceptors *are disabled*, preventing accidental use of retries. You can easily +override the number of retries (setting them to more than 0) with a `grpc.ClientOption`, e.g.: + + myclient.Ping(ctx, goodPing, grpc_retry.WithMax(5)) + +Other default options are: retry on `ResourceExhausted` and `Unavailable` gRPC codes, use a 50ms +linear backoff with 10% jitter. + +For chained interceptors, the retry interceptor will call every interceptor that follows it +whenever when a retry happens. + +Please see examples for more advanced use. +*/ +package grpc_retry diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go new file mode 100644 index 000000000..7a633e293 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/options.go @@ -0,0 +1,142 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "context" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // DefaultRetriableCodes is a set of well known types gRPC codes that should be retri-able. + // + // `ResourceExhausted` means that the user quota, e.g. per-RPC limits, have been reached. + // `Unavailable` means that system is currently unavailable and the client should retry again. + DefaultRetriableCodes = []codes.Code{codes.ResourceExhausted, codes.Unavailable} + + defaultOptions = &options{ + max: 0, // disabled + perCallTimeout: 0, // disabled + includeHeader: true, + codes: DefaultRetriableCodes, + backoffFunc: BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { + return BackoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10)(attempt) + }), + } +) + +// BackoffFunc denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. +type BackoffFunc func(attempt uint) time.Duration + +// BackoffFuncContext denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. The context can be used to extract request scoped metadata and context values. +type BackoffFuncContext func(ctx context.Context, attempt uint) time.Duration + +// Disable disables the retry behaviour on this call, or this interceptor. +// +// Its semantically the same to `WithMax` +func Disable() CallOption { + return WithMax(0) +} + +// WithMax sets the maximum number of retries on this call, or this interceptor. +func WithMax(maxRetries uint) CallOption { + return CallOption{applyFunc: func(o *options) { + o.max = maxRetries + }} +} + +// WithBackoff sets the `BackoffFunc` used to control time between retries. +func WithBackoff(bf BackoffFunc) CallOption { + return CallOption{applyFunc: func(o *options) { + o.backoffFunc = BackoffFuncContext(func(ctx context.Context, attempt uint) time.Duration { + return bf(attempt) + }) + }} +} + +// WithBackoffContext sets the `BackoffFuncContext` used to control time between retries. +func WithBackoffContext(bf BackoffFuncContext) CallOption { + return CallOption{applyFunc: func(o *options) { + o.backoffFunc = bf + }} +} + +// WithCodes sets which codes should be retried. +// +// Please *use with care*, as you may be retrying non-idempotent calls. +// +// You cannot automatically retry on Cancelled and Deadline, please use `WithPerRetryTimeout` for these. +func WithCodes(retryCodes ...codes.Code) CallOption { + return CallOption{applyFunc: func(o *options) { + o.codes = retryCodes + }} +} + +// WithPerRetryTimeout sets the RPC timeout per call (including initial call) on this call, or this interceptor. +// +// The context.Deadline of the call takes precedence and sets the maximum time the whole invocation +// will take, but WithPerRetryTimeout can be used to limit the RPC time per each call. +// +// For example, with context.Deadline = now + 10s, and WithPerRetryTimeout(3 * time.Seconds), each +// of the retry calls (including the initial one) will have a deadline of now + 3s. +// +// A value of 0 disables the timeout overrides completely and returns to each retry call using the +// parent `context.Deadline`. +// +// Note that when this is enabled, any DeadlineExceeded errors that are propagated up will be retried. +func WithPerRetryTimeout(timeout time.Duration) CallOption { + return CallOption{applyFunc: func(o *options) { + o.perCallTimeout = timeout + }} +} + +type options struct { + max uint + perCallTimeout time.Duration + includeHeader bool + codes []codes.Code + backoffFunc BackoffFuncContext +} + +// CallOption is a grpc.CallOption that is local to grpc_retry. +type CallOption struct { + grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. + applyFunc func(opt *options) +} + +func reuseOrNewWithCallOptions(opt *options, callOptions []CallOption) *options { + if len(callOptions) == 0 { + return opt + } + optCopy := &options{} + *optCopy = *opt + for _, f := range callOptions { + f.applyFunc(optCopy) + } + return optCopy +} + +func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []CallOption) { + for _, opt := range callOptions { + if co, ok := opt.(CallOption); ok { + retryOptions = append(retryOptions, co) + } else { + grpcOptions = append(grpcOptions, opt) + } + } + return grpcOptions, retryOptions +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go new file mode 100644 index 000000000..6793f17e6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go @@ -0,0 +1,323 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_retry + +import ( + "context" + "fmt" + "io" + "sync" + "time" + + "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils" + "golang.org/x/net/trace" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + AttemptMetadataKey = "x-retry-attempty" +) + +// UnaryClientInterceptor returns a new retrying unary client interceptor. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +func UnaryClientInterceptor(optFuncs ...CallOption) grpc.UnaryClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(parentCtx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return invoker(parentCtx, method, req, reply, cc, grpcOpts...) + } + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { + return err + } + callCtx := perCallContext(parentCtx, callOpts, attempt) + lastErr = invoker(callCtx, method, req, reply, cc, grpcOpts...) + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + if lastErr == nil { + return nil + } + logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + if isContextError(lastErr) { + if parentCtx.Err() != nil { + logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) + // its the parent context deadline or cancellation. + return lastErr + } else if callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) + continue + } + } + if !isRetriable(lastErr, callOpts) { + return lastErr + } + } + return lastErr + } +} + +// StreamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +// +// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs +// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, +// BidiStreams), the retry interceptor will fail the call. +func StreamClientInterceptor(optFuncs ...CallOption) grpc.StreamClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(parentCtx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return streamer(parentCtx, desc, cc, method, grpcOpts...) + } + if desc.ClientStreams { + return nil, status.Errorf(codes.Unimplemented, "grpc_retry: cannot retry on ClientStreams, set grpc_retry.Disable()") + } + + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, parentCtx, callOpts); err != nil { + return nil, err + } + callCtx := perCallContext(parentCtx, callOpts, 0) + + var newStreamer grpc.ClientStream + newStreamer, lastErr = streamer(callCtx, desc, cc, method, grpcOpts...) + if lastErr == nil { + retryingStreamer := &serverStreamingRetryingStream{ + ClientStream: newStreamer, + callOpts: callOpts, + parentCtx: parentCtx, + streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { + return streamer(ctx, desc, cc, method, grpcOpts...) + }, + } + return retryingStreamer, nil + } + + logTrace(parentCtx, "grpc_retry attempt: %d, got err: %v", attempt, lastErr) + if isContextError(lastErr) { + if parentCtx.Err() != nil { + logTrace(parentCtx, "grpc_retry attempt: %d, parent context error: %v", attempt, parentCtx.Err()) + // its the parent context deadline or cancellation. + return nil, lastErr + } else if callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(parentCtx, "grpc_retry attempt: %d, context error from retry call", attempt) + continue + } + } + if !isRetriable(lastErr, callOpts) { + return nil, lastErr + } + } + return nil, lastErr + } +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type serverStreamingRetryingStream struct { + grpc.ClientStream + bufferedSends []interface{} // single message that the client can sen + receivedGood bool // indicates whether any prior receives were successful + wasClosedSend bool // indicates that CloseSend was closed + parentCtx context.Context + callOpts *options + streamerCall func(ctx context.Context) (grpc.ClientStream, error) + mu sync.RWMutex +} + +func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { + s.mu.Lock() + s.ClientStream = clientStream + s.mu.Unlock() +} + +func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ClientStream +} + +func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { + s.mu.Lock() + s.bufferedSends = append(s.bufferedSends, m) + s.mu.Unlock() + return s.getStream().SendMsg(m) +} + +func (s *serverStreamingRetryingStream) CloseSend() error { + s.mu.Lock() + s.wasClosedSend = true + s.mu.Unlock() + return s.getStream().CloseSend() +} + +func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { + return s.getStream().Header() +} + +func (s *serverStreamingRetryingStream) Trailer() metadata.MD { + return s.getStream().Trailer() +} + +func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { + attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr // success or hard failure + } + // We start off from attempt 1, because zeroth was already made on normal SendMsg(). + for attempt := uint(1); attempt < s.callOpts.max; attempt++ { + if err := waitRetryBackoff(attempt, s.parentCtx, s.callOpts); err != nil { + return err + } + callCtx := perCallContext(s.parentCtx, s.callOpts, attempt) + newStream, err := s.reestablishStreamAndResendBuffer(callCtx) + if err != nil { + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + return err + } + s.setStream(newStream) + attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) + //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr) + if !attemptRetry { + return lastErr + } + } + return lastErr +} + +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { + s.mu.RLock() + wasGood := s.receivedGood + s.mu.RUnlock() + err := s.getStream().RecvMsg(m) + if err == nil || err == io.EOF { + s.mu.Lock() + s.receivedGood = true + s.mu.Unlock() + return false, err + } else if wasGood { + // previous RecvMsg in the stream succeeded, no retry logic should interfere + return false, err + } + if isContextError(err) { + if s.parentCtx.Err() != nil { + logTrace(s.parentCtx, "grpc_retry parent context error: %v", s.parentCtx.Err()) + return false, err + } else if s.callOpts.perCallTimeout != 0 { + // We have set a perCallTimeout in the retry middleware, which would result in a context error if + // the deadline was exceeded, in which case try again. + logTrace(s.parentCtx, "grpc_retry context error from retry call") + return true, err + } + } + return isRetriable(err, s.callOpts), err +} + +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { + s.mu.RLock() + bufferedSends := s.bufferedSends + s.mu.RUnlock() + newStream, err := s.streamerCall(callCtx) + if err != nil { + logTrace(callCtx, "grpc_retry failed redialing new stream: %v", err) + return nil, err + } + for _, msg := range bufferedSends { + if err := newStream.SendMsg(msg); err != nil { + logTrace(callCtx, "grpc_retry failed resending message: %v", err) + return nil, err + } + } + if err := newStream.CloseSend(); err != nil { + logTrace(callCtx, "grpc_retry failed CloseSend on new stream %v", err) + return nil, err + } + return newStream, nil +} + +func waitRetryBackoff(attempt uint, parentCtx context.Context, callOpts *options) error { + var waitTime time.Duration = 0 + if attempt > 0 { + waitTime = callOpts.backoffFunc(parentCtx, attempt) + } + if waitTime > 0 { + logTrace(parentCtx, "grpc_retry attempt: %d, backoff for %v", attempt, waitTime) + timer := time.NewTimer(waitTime) + select { + case <-parentCtx.Done(): + timer.Stop() + return contextErrToGrpcErr(parentCtx.Err()) + case <-timer.C: + } + } + return nil +} + +func isRetriable(err error, callOpts *options) bool { + errCode := status.Code(err) + if isContextError(err) { + // context errors are not retriable based on user settings. + return false + } + for _, code := range callOpts.codes { + if code == errCode { + return true + } + } + return false +} + +func isContextError(err error) bool { + code := status.Code(err) + return code == codes.DeadlineExceeded || code == codes.Canceled +} + +func perCallContext(parentCtx context.Context, callOpts *options, attempt uint) context.Context { + ctx := parentCtx + if callOpts.perCallTimeout != 0 { + ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout) + } + if attempt > 0 && callOpts.includeHeader { + mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt)) + ctx = mdClone.ToOutgoing(ctx) + } + return ctx +} + +func contextErrToGrpcErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Errorf(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Errorf(codes.Canceled, err.Error()) + default: + return status.Errorf(codes.Unknown, err.Error()) + } +} + +func logTrace(ctx context.Context, format string, a ...interface{}) { + tr, ok := trace.FromContext(ctx) + if !ok { + return + } + tr.LazyPrintf(format, a...) +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go new file mode 100644 index 000000000..4e69a6305 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils/backoff.go @@ -0,0 +1,28 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +Backoff Helper Utilities + +Implements common backoff features. +*/ +package backoffutils + +import ( + "math/rand" + "time" +) + +// JitterUp adds random jitter to the duration. +// +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +func JitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// ExponentBase2 computes 2^(a-1) where a >= 1. If a is 0, the result is 0. +func ExponentBase2(a uint) uint { + return (1 << a) >> 1 +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go new file mode 100644 index 000000000..1ed9bb499 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/doc.go @@ -0,0 +1,19 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +/* +Package `metautils` provides convenience functions for dealing with gRPC metadata.MD objects inside +Context handlers. + +While the upstream grpc-go package contains decent functionality (see https://github.com/grpc/grpc-go/blob/master/Documentation/grpc-metadata.md) +they are hard to use. + +The majority of functions center around the NiceMD, which is a convenience wrapper around metadata.MD. For example +the following code allows you to easily extract incoming metadata (server handler) and put it into a new client context +metadata. + + nmd := metautils.ExtractIncoming(serverCtx).Clone(":authorization", ":custom") + clientCtx := nmd.Set("x-client-header", "2").Set("x-another", "3").ToOutgoing(ctx) +*/ + +package metautils diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go new file mode 100644 index 000000000..9f0456747 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go @@ -0,0 +1,126 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package metautils + +import ( + "context" + "strings" + + "google.golang.org/grpc/metadata" +) + +// NiceMD is a convenience wrapper definiting extra functions on the metadata. +type NiceMD metadata.MD + +// ExtractIncoming extracts an inbound metadata from the server-side context. +// +// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns +// a new empty NiceMD. +func ExtractIncoming(ctx context.Context) NiceMD { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return NiceMD(metadata.Pairs()) + } + return NiceMD(md) +} + +// ExtractOutgoing extracts an outbound metadata from the client-side context. +// +// This function always returns a NiceMD wrapper of the metadata.MD, in case the context doesn't have metadata it returns +// a new empty NiceMD. +func ExtractOutgoing(ctx context.Context) NiceMD { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + return NiceMD(metadata.Pairs()) + } + return NiceMD(md) +} + +// Clone performs a *deep* copy of the metadata.MD. +// +// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted +// all keys get copied. +func (m NiceMD) Clone(copiedKeys ...string) NiceMD { + newMd := NiceMD(metadata.Pairs()) + for k, vv := range m { + found := false + if len(copiedKeys) == 0 { + found = true + } else { + for _, allowedKey := range copiedKeys { + if strings.EqualFold(allowedKey, k) { + found = true + break + } + } + } + if !found { + continue + } + newMd[k] = make([]string, len(vv)) + copy(newMd[k], vv) + } + return NiceMD(newMd) +} + +// ToOutgoing sets the given NiceMD as a client-side context for dispatching. +func (m NiceMD) ToOutgoing(ctx context.Context) context.Context { + return metadata.NewOutgoingContext(ctx, metadata.MD(m)) +} + +// ToIncoming sets the given NiceMD as a server-side context for dispatching. +// +// This is mostly useful in ServerInterceptors.. +func (m NiceMD) ToIncoming(ctx context.Context) context.Context { + return metadata.NewIncomingContext(ctx, metadata.MD(m)) +} + +// Get retrieves a single value from the metadata. +// +// It works analogously to http.Header.Get, returning the first value if there are many set. If the value is not set, +// an empty string is returned. +// +// The function is binary-key safe. +func (m NiceMD) Get(key string) string { + k, _ := encodeKeyValue(key, "") + vv, ok := m[k] + if !ok { + return "" + } + return vv[0] +} + +// Del retrieves a single value from the metadata. +// +// It works analogously to http.Header.Del, deleting all values if they exist. +// +// The function is binary-key safe. + +func (m NiceMD) Del(key string) NiceMD { + k, _ := encodeKeyValue(key, "") + delete(m, k) + return m +} + +// Set sets the given value in a metadata. +// +// It works analogously to http.Header.Set, overwriting all previous metadata values. +// +// The function is binary-key safe. +func (m NiceMD) Set(key string, value string) NiceMD { + k, v := encodeKeyValue(key, value) + m[k] = []string{v} + return m +} + +// Add retrieves a single value from the metadata. +// +// It works analogously to http.Header.Add, as it appends to any existing values associated with key. +// +// The function is binary-key safe. +func (m NiceMD) Add(key string, value string) NiceMD { + k, v := encodeKeyValue(key, value) + m[k] = append(m[k], v) + return m +} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go new file mode 100644 index 000000000..8a5387166 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/single_key.go @@ -0,0 +1,22 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package metautils + +import ( + "encoding/base64" + "strings" +) + +const ( + binHdrSuffix = "-bin" +) + +func encodeKeyValue(k, v string) (string, string) { + k = strings.ToLower(k) + if strings.HasSuffix(k, binHdrSuffix) { + val := base64.StdEncoding.EncodeToString([]byte(v)) + v = string(val) + } + return k, v +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 5148440e9..079779ddb 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -197,6 +197,9 @@ {"path":"github.com/gorilla/context","checksumSHA1":"g/V4qrXjUGG9B+e3hB+4NAYJ5Gs=","revision":"08b5f424b9271eedf6f9f0ce86cb9396ed337a42","revisionTime":"2016-08-17T18:46:32Z"}, {"path":"github.com/gorilla/mux","checksumSHA1":"STQSdSj2FcpCf0NLfdsKhNutQT0=","revision":"e48e440e4c92e3251d812f8ce7858944dfa3331c","revisionTime":"2018-08-07T07:52:56Z"}, {"path":"github.com/gorilla/websocket","checksumSHA1":"gr0edNJuVv4+olNNZl5ZmwLgscA=","revision":"0ec3d1bd7fe50c503d6df98ee649d81f4857c564","revisionTime":"2019-03-06T00:42:57Z"}, + {"path":"github.com/grpc-ecosystem/go-grpc-middleware/retry","checksumSHA1":"Wmzc+OYGzhkkXvwphrh/1C7TGmI=","revision":"3ce3d519df39b5289d789b3d54f00c7a19929fe4","revisionTime":"2020-02-28T13:55:17Z"}, + {"path":"github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils","checksumSHA1":"BnZAJHYhcmPNlto3WkzkWnRPlXs=","revision":"3ce3d519df39b5289d789b3d54f00c7a19929fe4","revisionTime":"2020-02-28T13:55:17Z"}, + {"path":"github.com/grpc-ecosystem/go-grpc-middleware/util/metautils","checksumSHA1":"xvSmjSRfCFmWbEs50bSDXyzRWCo=","revision":"3ce3d519df39b5289d789b3d54f00c7a19929fe4","revisionTime":"2020-02-28T13:55:17Z"}, {"path":"github.com/hashicorp/consul-template","checksumSHA1":"R4eLvAFtqPg22sjAUysBhFfdUPs=","revision":"58aa6c608af3387d0c2bf5d028be4960be1dbe56","revisionTime":"2020-01-25T00:24:05Z","version":"v0.24.1","versionExact":"v0.24.1"}, {"path":"github.com/hashicorp/consul-template/child","checksumSHA1":"yQfiSUOpV5BvGeztDd4fcA7qsbw=","revision":"58aa6c608af3387d0c2bf5d028be4960be1dbe56","revisionTime":"2020-01-25T00:24:05Z","version":"v0.24.1","versionExact":"v0.24.1"}, {"path":"github.com/hashicorp/consul-template/config","checksumSHA1":"ldDPZxD2PEPY4F9MFSOG4D8FWo8=","revision":"58aa6c608af3387d0c2bf5d028be4960be1dbe56","revisionTime":"2020-01-25T00:24:05Z","version":"v0.24.1","versionExact":"v0.24.1"}, From 426c26d7c013b88b798e97036ef93868f7fd9ff4 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 22 Oct 2019 15:20:26 +0200 Subject: [PATCH 004/126] CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them. --- api/tasks.go | 49 +++ client/allocrunner/alloc_runner.go | 7 + client/allocrunner/config.go | 5 + .../taskrunner/plugin_supervisor_hook.go | 333 +++++++++++++++++ client/allocrunner/taskrunner/task_runner.go | 8 + .../taskrunner/task_runner_hooks.go | 6 + client/client.go | 37 ++ client/dynamicplugins/registry.go | 338 ++++++++++++++++++ client/dynamicplugins/registry_test.go | 171 +++++++++ client/node_updater.go | 191 +++++++++- client/pluginmanager/csimanager/instance.go | 203 +++++++++++ .../pluginmanager/csimanager/instance_test.go | 159 ++++++++ client/pluginmanager/csimanager/manager.go | 153 ++++++++ .../pluginmanager/csimanager/manager_test.go | 111 ++++++ command/agent/job_endpoint.go | 13 + jobspec/parse_task.go | 21 ++ jobspec/parse_test.go | 24 ++ jobspec/test-fixtures/csi-plugin.hcl | 13 + nomad/structs/csi.go | 68 ++++ nomad/structs/node.go | 166 +++++++++ nomad/structs/structs.go | 45 +++ nomad/structs/structs_test.go | 49 +++ plugins/csi/client.go | 210 +++++++++++ plugins/csi/client_test.go | 191 ++++++++++ plugins/csi/fake/client.go | 112 ++++++ plugins/csi/plugin.go | 85 +++++ plugins/csi/testing/client.go | 43 +++ 27 files changed, 2805 insertions(+), 6 deletions(-) create mode 100644 client/allocrunner/taskrunner/plugin_supervisor_hook.go create mode 100644 client/dynamicplugins/registry.go create mode 100644 client/dynamicplugins/registry_test.go create mode 100644 client/pluginmanager/csimanager/instance.go create mode 100644 client/pluginmanager/csimanager/instance_test.go create mode 100644 client/pluginmanager/csimanager/manager.go create mode 100644 client/pluginmanager/csimanager/manager_test.go create mode 100644 jobspec/test-fixtures/csi-plugin.hcl create mode 100644 nomad/structs/csi.go create mode 100644 plugins/csi/client.go create mode 100644 plugins/csi/client_test.go create mode 100644 plugins/csi/fake/client.go create mode 100644 plugins/csi/plugin.go create mode 100644 plugins/csi/testing/client.go diff --git a/api/tasks.go b/api/tasks.go index 4e05a2cd3..7e68fea25 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -643,6 +643,7 @@ type Task struct { Templates []*Template DispatchPayload *DispatchPayloadConfig VolumeMounts []*VolumeMount + CSIPluginConfig *TaskCSIPluginConfig `mapstructure:"csi_plugin" json:"csi_plugin,omitempty"` Leader bool ShutdownDelay time.Duration `mapstructure:"shutdown_delay"` KillSignal string `mapstructure:"kill_signal"` @@ -683,6 +684,9 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { if t.Lifecycle.Empty() { t.Lifecycle = nil } + if t.CSIPluginConfig != nil { + t.CSIPluginConfig.Canonicalize() + } } // TaskArtifact is used to download artifacts before running a task. @@ -909,3 +913,48 @@ type TaskEvent struct { TaskSignal string GenericSource string } + +// CSIPluginType is an enum string that encapsulates the valid options for a +// CSIPlugin stanza's Type. These modes will allow the plugin to be used in +// different ways by the client. +type CSIPluginType string + +const ( + // CSIPluginTypeNode indicates that Nomad should only use the plugin for + // performing Node RPCs against the provided plugin. + CSIPluginTypeNode CSIPluginType = "node" + + // CSIPluginTypeController indicates that Nomad should only use the plugin for + // performing Controller RPCs against the provided plugin. + CSIPluginTypeController CSIPluginType = "controller" + + // CSIPluginTypeMonolith indicates that Nomad can use the provided plugin for + // both controller and node rpcs. + CSIPluginTypeMonolith CSIPluginType = "monolith" +) + +// TaskCSIPluginConfig contains the data that is required to setup a task as a +// CSI plugin. This will be used by the csi_plugin_supervisor_hook to configure +// mounts for the plugin and initiate the connection to the plugin catalog. +type TaskCSIPluginConfig struct { + // ID is the identifier of the plugin. + // Ideally this should be the FQDN of the plugin. + ID string `mapstructure:"id"` + + // CSIPluginType instructs Nomad on how to handle processing a plugin + Type CSIPluginType `mapstructure:"type"` + + // MountDir is the destination that nomad should mount in its CSI + // directory for the plugin. It will then expect a file called CSISocketName + // to be created by the plugin, and will provide references into + // "MountDir/CSIIntermediaryDirname/VolumeName/AllocID for mounts. + // + // Default is /csi. + MountDir string `mapstructure:"mount_dir"` +} + +func (t *TaskCSIPluginConfig) Canonicalize() { + if t.MountDir == "" { + t.MountDir = "/csi" + } +} diff --git a/client/allocrunner/alloc_runner.go b/client/allocrunner/alloc_runner.go index fdd62ad98..9c8286c2d 100644 --- a/client/allocrunner/alloc_runner.go +++ b/client/allocrunner/alloc_runner.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/devicemanager" + "github.com/hashicorp/nomad/client/dynamicplugins" cinterfaces "github.com/hashicorp/nomad/client/interfaces" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" cstate "github.com/hashicorp/nomad/client/state" @@ -134,6 +135,10 @@ type allocRunner struct { // prevAllocMigrator allows the migration of a previous allocations alloc dir. prevAllocMigrator allocwatcher.PrevAllocMigrator + // dynamicRegistry contains all locally registered dynamic plugins (e.g csi + // plugins). + dynamicRegistry dynamicplugins.Registry + // devicemanager is used to mount devices as well as lookup device // statistics devicemanager devicemanager.Manager @@ -178,6 +183,7 @@ func NewAllocRunner(config *Config) (*allocRunner, error) { deviceStatsReporter: config.DeviceStatsReporter, prevAllocWatcher: config.PrevAllocWatcher, prevAllocMigrator: config.PrevAllocMigrator, + dynamicRegistry: config.DynamicRegistry, devicemanager: config.DeviceManager, driverManager: config.DriverManager, serversContactedCh: config.ServersContactedCh, @@ -218,6 +224,7 @@ func (ar *allocRunner) initTaskRunners(tasks []*structs.Task) error { Logger: ar.logger, StateDB: ar.stateDB, StateUpdater: ar, + DynamicRegistry: ar.dynamicRegistry, Consul: ar.consulClient, ConsulSI: ar.sidsClient, Vault: ar.vaultClient, diff --git a/client/allocrunner/config.go b/client/allocrunner/config.go index a9240b3a3..4893c9604 100644 --- a/client/allocrunner/config.go +++ b/client/allocrunner/config.go @@ -6,6 +6,7 @@ import ( clientconfig "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/devicemanager" + "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/interfaces" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" cstate "github.com/hashicorp/nomad/client/state" @@ -48,6 +49,10 @@ type Config struct { // PrevAllocMigrator allows the migration of a previous allocations alloc dir PrevAllocMigrator allocwatcher.PrevAllocMigrator + // DynamicRegistry contains all locally registered dynamic plugins (e.g csi + // plugins). + DynamicRegistry dynamicplugins.Registry + // DeviceManager is used to mount devices as well as lookup device // statistics DeviceManager devicemanager.Manager diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go new file mode 100644 index 000000000..5774c4548 --- /dev/null +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -0,0 +1,333 @@ +package taskrunner + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sync" + "time" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/client/allocrunner/interfaces" + ti "github.com/hashicorp/nomad/client/allocrunner/taskrunner/interfaces" + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" + "github.com/hashicorp/nomad/plugins/drivers" +) + +// csiPluginSupervisorHook manages supervising plugins that are running as Nomad +// tasks. These plugins will be fingerprinted and it will manage connecting them +// to their requisite plugin manager. +// +// It provides a couple of things to a task running inside Nomad. These are: +// * A mount to the `plugin_mount_dir`, that will then be used by Nomad +// to connect to the nested plugin and handle volume mounts. +// * When the task has started, it starts a loop of attempting to connect to the +// plugin, to perform initial fingerprinting of the plugins capabilities before +// notifying the plugin manager of the plugin. +type csiPluginSupervisorHook struct { + logger hclog.Logger + alloc *structs.Allocation + task *structs.Task + runner *TaskRunner + mountPoint string + + // eventEmitter is used to emit events to the task + eventEmitter ti.EventEmitter + + shutdownCtx context.Context + shutdownCancelFn context.CancelFunc + + running bool + runningLock sync.Mutex + + // previousHealthstate is used by the supervisor goroutine to track historic + // health states for gating task events. + previousHealthState bool +} + +// The plugin supervisor uses the PrestartHook mechanism to setup the requisite +// mount points and configuration for the task that exposes a CSI plugin. +var _ interfaces.TaskPrestartHook = &csiPluginSupervisorHook{} + +// The plugin supervisor uses the PoststartHook mechanism to start polling the +// plugin for readiness and supported functionality before registering the +// plugin with the catalog. +var _ interfaces.TaskPoststartHook = &csiPluginSupervisorHook{} + +// The plugin supervisor uses the StopHook mechanism to deregister the plugin +// with the catalog and to ensure any mounts are cleaned up. +var _ interfaces.TaskStopHook = &csiPluginSupervisorHook{} + +func newCSIPluginSupervisorHook(csiRootDir string, eventEmitter ti.EventEmitter, runner *TaskRunner, logger hclog.Logger) *csiPluginSupervisorHook { + task := runner.Task() + pluginRoot := filepath.Join(csiRootDir, string(task.CSIPluginConfig.Type), task.CSIPluginConfig.ID) + + shutdownCtx, cancelFn := context.WithCancel(context.Background()) + + hook := &csiPluginSupervisorHook{ + alloc: runner.Alloc(), + runner: runner, + logger: logger, + task: task, + mountPoint: pluginRoot, + shutdownCtx: shutdownCtx, + shutdownCancelFn: cancelFn, + eventEmitter: eventEmitter, + } + + return hook +} + +func (*csiPluginSupervisorHook) Name() string { + return "csi_plugin_supervisor" +} + +// Prestart is called before the task is started including after every +// restart. This requires that the mount paths for a plugin be idempotent, +// despite us not knowing the name of the plugin ahead of time. +// Because of this, we use the allocid_taskname as the unique identifier for a +// plugin on the filesystem. +func (h *csiPluginSupervisorHook) Prestart(ctx context.Context, + req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { + // Create the mount directory that the container will access if it doesn't + // already exist. Default to only user access. + if err := os.MkdirAll(h.mountPoint, 0700); err != nil && !os.IsExist(err) { + return fmt.Errorf("failed to create mount point: %v", err) + } + + configMount := &drivers.MountConfig{ + TaskPath: h.task.CSIPluginConfig.MountDir, + HostPath: h.mountPoint, + Readonly: false, + PropagationMode: "bidirectional", + } + + mounts := ensureMountpointInserted(h.runner.hookResources.getMounts(), configMount) + h.runner.hookResources.setMounts(mounts) + + resp.Done = true + return nil +} + +// Poststart is called after the task has started. Poststart is not +// called if the allocation is terminal. +// +// The context is cancelled if the task is killed. +func (h *csiPluginSupervisorHook) Poststart(_ context.Context, _ *interfaces.TaskPoststartRequest, _ *interfaces.TaskPoststartResponse) error { + // If we're already running the supervisor routine, then we don't need to try + // and restart it here as it only terminates on `Stop` hooks. + h.runningLock.Lock() + if h.running { + h.runningLock.Unlock() + return nil + } + h.runningLock.Unlock() + + go h.ensureSupervisorLoop(h.shutdownCtx) + return nil +} + +// ensureSupervisorLoop should be called in a goroutine. It will terminate when +// the passed in context is terminated. +// +// The supervisor works by: +// - Initially waiting for the plugin to become available. This loop is expensive +// and may do things like create new gRPC Clients on every iteration. +// - After receiving an initial healthy status, it will inform the plugin catalog +// of the plugin, registering it with the plugins fingerprinted capabilities. +// - We then perform a more lightweight check, simply probing the plugin on a less +// frequent interval to ensure it is still alive, emitting task events when this +// status changes. +// +// Deeper fingerprinting of the plugin is implemented by the csimanager. +func (h *csiPluginSupervisorHook) ensureSupervisorLoop(ctx context.Context) { + h.runningLock.Lock() + if h.running == true { + h.runningLock.Unlock() + return + } + h.running = true + h.runningLock.Unlock() + + defer func() { + h.runningLock.Lock() + h.running = false + h.runningLock.Unlock() + }() + + socketPath := filepath.Join(h.mountPoint, structs.CSISocketName) + t := time.NewTimer(0) + + // Step 1: Wait for the plugin to initially become available. +WAITFORREADY: + for { + select { + case <-ctx.Done(): + return + case <-t.C: + pluginHealthy, err := h.supervisorLoopOnce(ctx, socketPath) + if err != nil || !pluginHealthy { + h.logger.Info("CSI Plugin not ready", "error", err) + + // Plugin is not yet returning healthy, because we want to optimise for + // quickly bringing a plugin online, we use a short timeout here. + // TODO(dani): Test with more plugins and adjust. + t.Reset(5 * time.Second) + continue + } + + // Mark the plugin as healthy in a task event + h.previousHealthState = pluginHealthy + event := structs.NewTaskEvent(structs.TaskPluginHealthy) + event.SetMessage(fmt.Sprintf("plugin: %s", h.task.CSIPluginConfig.ID)) + h.eventEmitter.EmitEvent(event) + + break WAITFORREADY + } + } + + // Step 2: Register the plugin with the catalog. + deregisterPluginFn, err := h.registerPlugin(socketPath) + if err != nil { + h.logger.Error("CSI Plugin registration failed", "error", err) + event := structs.NewTaskEvent(structs.TaskPluginUnhealthy) + event.SetMessage(fmt.Sprintf("failed to register plugin: %s, reason: %v", h.task.CSIPluginConfig.ID, err)) + h.eventEmitter.EmitEvent(event) + } + + // Step 3: Start the lightweight supervisor loop. + t.Reset(0) + for { + select { + case <-ctx.Done(): + // De-register plugins on task shutdown + deregisterPluginFn() + return + case <-t.C: + pluginHealthy, err := h.supervisorLoopOnce(ctx, socketPath) + if err != nil { + h.logger.Error("CSI Plugin fingerprinting failed", "error", err) + } + + // The plugin has transitioned to a healthy state. Emit an event. + if !h.previousHealthState && pluginHealthy { + event := structs.NewTaskEvent(structs.TaskPluginHealthy) + event.SetMessage(fmt.Sprintf("plugin: %s", h.task.CSIPluginConfig.ID)) + h.eventEmitter.EmitEvent(event) + } + + // The plugin has transitioned to an unhealthy state. Emit an event. + if h.previousHealthState && !pluginHealthy { + event := structs.NewTaskEvent(structs.TaskPluginUnhealthy) + if err != nil { + event.SetMessage(fmt.Sprintf("error: %v", err)) + } else { + event.SetMessage("Unknown Reason") + } + h.eventEmitter.EmitEvent(event) + } + + h.previousHealthState = pluginHealthy + + // This loop is informational and in some plugins this may be expensive to + // validate. We use a longer timeout (30s) to avoid causing undue work. + t.Reset(30 * time.Second) + } + } +} + +func (h *csiPluginSupervisorHook) registerPlugin(socketPath string) (func(), error) { + mkInfoFn := func(pluginType string) *dynamicplugins.PluginInfo { + return &dynamicplugins.PluginInfo{ + Type: pluginType, + Name: h.task.CSIPluginConfig.ID, + Version: "1.0.0", + ConnectionInfo: &dynamicplugins.PluginConnectionInfo{ + SocketPath: socketPath, + }, + } + } + + registrations := []*dynamicplugins.PluginInfo{} + + switch h.task.CSIPluginConfig.Type { + case structs.CSIPluginTypeController: + registrations = append(registrations, mkInfoFn(dynamicplugins.PluginTypeCSIController)) + case structs.CSIPluginTypeNode: + registrations = append(registrations, mkInfoFn(dynamicplugins.PluginTypeCSINode)) + case structs.CSIPluginTypeMonolith: + registrations = append(registrations, mkInfoFn(dynamicplugins.PluginTypeCSIController)) + registrations = append(registrations, mkInfoFn(dynamicplugins.PluginTypeCSINode)) + } + + deregistrationFns := []func(){} + + for _, reg := range registrations { + if err := h.runner.dynamicRegistry.RegisterPlugin(reg); err != nil { + for _, fn := range deregistrationFns { + fn() + } + return nil, err + } + + deregistrationFns = append(deregistrationFns, func() { + err := h.runner.dynamicRegistry.DeregisterPlugin(reg.Type, reg.Name) + if err != nil { + h.logger.Error("failed to deregister csi plugin", "name", reg.Name, "type", reg.Type, "error", err) + } + }) + } + + return func() { + for _, fn := range deregistrationFns { + fn() + } + }, nil +} + +func (h *csiPluginSupervisorHook) supervisorLoopOnce(ctx context.Context, socketPath string) (bool, error) { + _, err := os.Stat(socketPath) + if err != nil { + return false, fmt.Errorf("failed to stat socket: %v", err) + } + + client, err := csi.NewClient(socketPath) + defer client.Close() + if err != nil { + return false, fmt.Errorf("failed to create csi client: %v", err) + } + + healthy, err := client.PluginProbe(ctx) + if err != nil { + return false, fmt.Errorf("failed to probe plugin: %v", err) + } + + return healthy, nil +} + +// Stop is called after the task has exited and will not be started +// again. It is the only hook guaranteed to be executed whenever +// TaskRunner.Run is called (and not gracefully shutting down). +// Therefore it may be called even when prestart and the other hooks +// have not. +// +// Stop hooks must be idempotent. The context is cancelled prematurely if the +// task is killed. +func (h *csiPluginSupervisorHook) Stop(_ context.Context, req *interfaces.TaskStopRequest, _ *interfaces.TaskStopResponse) error { + h.shutdownCancelFn() + return nil +} + +func ensureMountpointInserted(mounts []*drivers.MountConfig, mount *drivers.MountConfig) []*drivers.MountConfig { + for _, mnt := range mounts { + if mnt.IsEqual(mount) { + return mounts + } + } + + mounts = append(mounts, mount) + return mounts +} diff --git a/client/allocrunner/taskrunner/task_runner.go b/client/allocrunner/taskrunner/task_runner.go index a24b634e5..9982db96b 100644 --- a/client/allocrunner/taskrunner/task_runner.go +++ b/client/allocrunner/taskrunner/task_runner.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/devicemanager" + "github.com/hashicorp/nomad/client/dynamicplugins" cinterfaces "github.com/hashicorp/nomad/client/interfaces" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" cstate "github.com/hashicorp/nomad/client/state" @@ -194,6 +195,9 @@ type TaskRunner struct { // handlers driverManager drivermanager.Manager + // dynamicRegistry is where dynamic plugins should be registered. + dynamicRegistry dynamicplugins.Registry + // maxEvents is the capacity of the TaskEvents on the TaskState. // Defaults to defaultMaxEvents but overrideable for testing. maxEvents int @@ -227,6 +231,9 @@ type Config struct { // ConsulSI is the client to use for managing Consul SI tokens ConsulSI consul.ServiceIdentityAPI + // DynamicRegistry is where dynamic plugins should be registered. + DynamicRegistry dynamicplugins.Registry + // Vault is the client to use to derive and renew Vault tokens Vault vaultclient.VaultClient @@ -285,6 +292,7 @@ func NewTaskRunner(config *Config) (*TaskRunner, error) { taskName: config.Task.Name, taskLeader: config.Task.Leader, envBuilder: envBuilder, + dynamicRegistry: config.DynamicRegistry, consulClient: config.Consul, siClient: config.ConsulSI, vaultClient: config.Vault, diff --git a/client/allocrunner/taskrunner/task_runner_hooks.go b/client/allocrunner/taskrunner/task_runner_hooks.go index 549b8316e..470ecd2db 100644 --- a/client/allocrunner/taskrunner/task_runner_hooks.go +++ b/client/allocrunner/taskrunner/task_runner_hooks.go @@ -3,6 +3,7 @@ package taskrunner import ( "context" "fmt" + "path/filepath" "sync" "time" @@ -69,6 +70,11 @@ func (tr *TaskRunner) initHooks() { newDeviceHook(tr.devicemanager, hookLogger), } + // If the task has a CSI stanza, add the hook. + if task.CSIPluginConfig != nil { + tr.runnerHooks = append(tr.runnerHooks, newCSIPluginSupervisorHook(filepath.Join(tr.clientConfig.StateDir, "csi"), tr, tr, hookLogger)) + } + // If Vault is enabled, add the hook if task.Vault != nil { tr.runnerHooks = append(tr.runnerHooks, newVaultHook(&vaultHookConfig{ diff --git a/client/client.go b/client/client.go index 6996875f4..aa9ecbf97 100644 --- a/client/client.go +++ b/client/client.go @@ -26,8 +26,10 @@ import ( "github.com/hashicorp/nomad/client/config" consulApi "github.com/hashicorp/nomad/client/consul" "github.com/hashicorp/nomad/client/devicemanager" + "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/fingerprint" "github.com/hashicorp/nomad/client/pluginmanager" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" "github.com/hashicorp/nomad/client/servers" "github.com/hashicorp/nomad/client/state" @@ -42,6 +44,7 @@ import ( "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" nconfig "github.com/hashicorp/nomad/nomad/structs/config" + "github.com/hashicorp/nomad/plugins/csi" "github.com/hashicorp/nomad/plugins/device" "github.com/hashicorp/nomad/plugins/drivers" vaultapi "github.com/hashicorp/vault/api" @@ -258,6 +261,9 @@ type Client struct { // pluginManagers is the set of PluginManagers registered by the client pluginManagers *pluginmanager.PluginGroup + // csimanager is responsible for managing csi plugins. + csimanager pluginmanager.PluginManager + // devicemanger is responsible for managing device plugins. devicemanager devicemanager.Manager @@ -279,6 +285,10 @@ type Client struct { // successfully run once. serversContactedCh chan struct{} serversContactedOnce sync.Once + + // dynamicRegistry provides access to plugins that are dynamically registered + // with a nomad client. Currently only used for CSI. + dynamicRegistry dynamicplugins.Registry } var ( @@ -331,11 +341,20 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic invalidAllocs: make(map[string]struct{}), serversContactedCh: make(chan struct{}), serversContactedOnce: sync.Once{}, + dynamicRegistry: dynamicplugins.NewRegistry(map[string]dynamicplugins.PluginDispenser{ + dynamicplugins.PluginTypeCSIController: func(info *dynamicplugins.PluginInfo) (interface{}, error) { + return csi.NewClient(info.ConnectionInfo.SocketPath) + }, + dynamicplugins.PluginTypeCSINode: func(info *dynamicplugins.PluginInfo) (interface{}, error) { + return csi.NewClient(info.ConnectionInfo.SocketPath) + }, + }), } c.batchNodeUpdates = newBatchNodeUpdates( c.updateNodeFromDriver, c.updateNodeFromDevices, + c.updateNodeFromCSI, ) // Initialize the server manager @@ -383,6 +402,16 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic allowlistDrivers := cfg.ReadStringListToMap("driver.whitelist") blocklistDrivers := cfg.ReadStringListToMap("driver.blacklist") + // Setup the csi manager + csiConfig := &csimanager.Config{ + Logger: c.logger, + DynamicRegistry: c.dynamicRegistry, + UpdateNodeCSIInfoFunc: c.batchNodeUpdates.updateNodeFromCSI, + } + csiManager := csimanager.New(csiConfig) + c.csimanager = csiManager + c.pluginManagers.RegisterAndRun(csiManager) + // Setup the driver manager driverConfig := &drivermanager.Config{ Logger: c.logger, @@ -1054,6 +1083,7 @@ func (c *Client) restoreState() error { Vault: c.vaultClient, PrevAllocWatcher: prevAllocWatcher, PrevAllocMigrator: prevAllocMigrator, + DynamicRegistry: c.dynamicRegistry, DeviceManager: c.devicemanager, DriverManager: c.drivermanager, ServersContactedCh: c.serversContactedCh, @@ -1279,6 +1309,12 @@ func (c *Client) setupNode() error { if node.Drivers == nil { node.Drivers = make(map[string]*structs.DriverInfo) } + if node.CSIControllerPlugins == nil { + node.CSIControllerPlugins = make(map[string]*structs.CSIInfo) + } + if node.CSINodePlugins == nil { + node.CSINodePlugins = make(map[string]*structs.CSIInfo) + } if node.Meta == nil { node.Meta = make(map[string]string) } @@ -2310,6 +2346,7 @@ func (c *Client) addAlloc(alloc *structs.Allocation, migrateToken string) error DeviceStatsReporter: c, PrevAllocWatcher: prevAllocWatcher, PrevAllocMigrator: prevAllocMigrator, + DynamicRegistry: c.dynamicRegistry, DeviceManager: c.devicemanager, DriverManager: c.drivermanager, } diff --git a/client/dynamicplugins/registry.go b/client/dynamicplugins/registry.go new file mode 100644 index 000000000..b1aa06130 --- /dev/null +++ b/client/dynamicplugins/registry.go @@ -0,0 +1,338 @@ +// dynamicplugins is a package that manages dynamic plugins in Nomad. +// It exposes a registry that allows for plugins to be registered/deregistered +// and also allows subscribers to receive real time updates of these events. +package dynamicplugins + +import ( + "context" + "errors" + "fmt" + "sync" +) + +const ( + PluginTypeCSIController = "csi-controller" + PluginTypeCSINode = "csi-node" +) + +// Registry is an interface that allows for the dynamic registration of plugins +// that are running as Nomad Tasks. +type Registry interface { + RegisterPlugin(info *PluginInfo) error + DeregisterPlugin(ptype, name string) error + + ListPlugins(ptype string) []*PluginInfo + DispensePlugin(ptype, name string) (interface{}, error) + + PluginsUpdatedCh(ctx context.Context, ptype string) <-chan *PluginUpdateEvent + + Shutdown() +} + +type PluginDispenser func(info *PluginInfo) (interface{}, error) + +// NewRegistry takes a map of `plugintype` to PluginDispenser functions +// that should be used to vend clients for plugins to be used. +func NewRegistry(dispensers map[string]PluginDispenser) Registry { + return &dynamicRegistry{ + plugins: make(map[string]map[string]*PluginInfo), + broadcasters: make(map[string]*pluginEventBroadcaster), + dispensers: dispensers, + } +} + +// PluginInfo is the metadata that is stored by the registry for a given plugin. +type PluginInfo struct { + Name string + Type string + Version string + + // ConnectionInfo should only be used externally during `RegisterPlugin` and + // may not be exposed in the future. + ConnectionInfo *PluginConnectionInfo +} + +// PluginConnectionInfo is the data required to connect to the plugin. +// note: We currently only support Unix Domain Sockets, but this may be expanded +// to support other connection modes in the future. +type PluginConnectionInfo struct { + // SocketPath is the path to the plugins api socket. + SocketPath string +} + +// EventType is the enum of events that will be emitted by a Registry's +// PluginsUpdatedCh. +type EventType string + +const ( + // EventTypeRegistered is emitted by the Registry when a new plugin has been + // registered. + EventTypeRegistered EventType = "registered" + // EventTypeDeregistered is emitted by the Registry when a plugin has been + // removed. + EventTypeDeregistered EventType = "deregistered" +) + +// PluginUpdateEvent is a struct that is sent over a PluginsUpdatedCh when +// plugins are added or removed from the registry. +type PluginUpdateEvent struct { + EventType EventType + Info *PluginInfo +} + +type dynamicRegistry struct { + plugins map[string]map[string]*PluginInfo + pluginsLock sync.RWMutex + + broadcasters map[string]*pluginEventBroadcaster + broadcastersLock sync.Mutex + + dispensers map[string]PluginDispenser +} + +func (d *dynamicRegistry) RegisterPlugin(info *PluginInfo) error { + if info.Type == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return errors.New("Plugin.Type must not be empty") + } + + if info.ConnectionInfo == nil { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return errors.New("Plugin.ConnectionInfo must not be nil") + } + + if info.Name == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return errors.New("Plugin.Name must not be empty") + } + + d.pluginsLock.Lock() + defer d.pluginsLock.Unlock() + + pmap, ok := d.plugins[info.Type] + if !ok { + pmap = make(map[string]*PluginInfo, 1) + d.plugins[info.Type] = pmap + } + + pmap[info.Name] = info + + broadcaster := d.broadcasterForPluginType(info.Type) + event := &PluginUpdateEvent{ + EventType: EventTypeRegistered, + Info: info, + } + broadcaster.broadcast(event) + + return nil +} + +func (d *dynamicRegistry) broadcasterForPluginType(ptype string) *pluginEventBroadcaster { + d.broadcastersLock.Lock() + defer d.broadcastersLock.Unlock() + + broadcaster, ok := d.broadcasters[ptype] + if !ok { + broadcaster = newPluginEventBroadcaster() + d.broadcasters[ptype] = broadcaster + } + + return broadcaster +} + +func (d *dynamicRegistry) DeregisterPlugin(ptype, name string) error { + d.pluginsLock.Lock() + defer d.pluginsLock.Unlock() + + if ptype == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return errors.New("must specify plugin type to deregister") + } + if name == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return errors.New("must specify plugin name to deregister") + } + + pmap, ok := d.plugins[ptype] + if !ok { + // If this occurs there's a bug in the registration handler. + return fmt.Errorf("no plugins registered for type: %s", ptype) + } + + info, ok := pmap[name] + if !ok { + // plugin already deregistered, don't send events or try re-deleting. + return nil + } + delete(pmap, name) + + broadcaster := d.broadcasterForPluginType(ptype) + event := &PluginUpdateEvent{ + EventType: EventTypeDeregistered, + Info: info, + } + broadcaster.broadcast(event) + + return nil +} + +func (d *dynamicRegistry) ListPlugins(ptype string) []*PluginInfo { + d.pluginsLock.RLock() + defer d.pluginsLock.RUnlock() + + pmap, ok := d.plugins[ptype] + if !ok { + return nil + } + + plugins := make([]*PluginInfo, 0, len(pmap)) + + for _, info := range pmap { + plugins = append(plugins, info) + } + + return plugins +} + +func (d *dynamicRegistry) DispensePlugin(ptype string, name string) (interface{}, error) { + d.pluginsLock.Lock() + defer d.pluginsLock.Unlock() + + if ptype == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return nil, errors.New("must specify plugin type to deregister") + } + if name == "" { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return nil, errors.New("must specify plugin name to deregister") + } + + dispenseFunc, ok := d.dispensers[ptype] + if !ok { + // This error shouldn't make it to a production cluster and is to aid + // developers during the development of new plugin types. + return nil, fmt.Errorf("no plugin dispenser found for type: %s", ptype) + } + + pmap, ok := d.plugins[ptype] + if !ok { + return nil, fmt.Errorf("no plugins registered for type: %s", ptype) + } + + info, ok := pmap[name] + if !ok { + return nil, fmt.Errorf("plugin %s for type %s not found", name, ptype) + } + + return dispenseFunc(info) +} + +// PluginsUpdatedCh returns a channel over which plugin events for the requested +// plugin type will be emitted. These events are strongly ordered and will never +// be dropped. +// +// The receiving channel _must not_ be closed before the provided context is +// cancelled. +func (d *dynamicRegistry) PluginsUpdatedCh(ctx context.Context, ptype string) <-chan *PluginUpdateEvent { + b := d.broadcasterForPluginType(ptype) + ch := b.subscribe() + go func() { + select { + case <-b.shutdownCh: + return + case <-ctx.Done(): + b.unsubscribe(ch) + } + }() + + return ch +} + +func (d *dynamicRegistry) Shutdown() { + for _, b := range d.broadcasters { + b.shutdown() + } +} + +type pluginEventBroadcaster struct { + stopCh chan struct{} + shutdownCh chan struct{} + publishCh chan *PluginUpdateEvent + + subscriptions map[chan *PluginUpdateEvent]struct{} + subscriptionsLock sync.RWMutex +} + +func newPluginEventBroadcaster() *pluginEventBroadcaster { + b := &pluginEventBroadcaster{ + stopCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + publishCh: make(chan *PluginUpdateEvent, 1), + subscriptions: make(map[chan *PluginUpdateEvent]struct{}), + } + go b.run() + return b +} + +func (p *pluginEventBroadcaster) run() { + for { + select { + case <-p.stopCh: + close(p.shutdownCh) + return + case msg := <-p.publishCh: + p.subscriptionsLock.RLock() + for msgCh := range p.subscriptions { + select { + case msgCh <- msg: + } + } + p.subscriptionsLock.RUnlock() + } + } +} + +func (p *pluginEventBroadcaster) shutdown() { + close(p.stopCh) + + // Wait for loop to exit before closing subscriptions + <-p.shutdownCh + + p.subscriptionsLock.Lock() + for sub := range p.subscriptions { + delete(p.subscriptions, sub) + close(sub) + } + p.subscriptionsLock.Unlock() +} + +func (p *pluginEventBroadcaster) broadcast(e *PluginUpdateEvent) { + p.publishCh <- e +} + +func (p *pluginEventBroadcaster) subscribe() chan *PluginUpdateEvent { + p.subscriptionsLock.Lock() + defer p.subscriptionsLock.Unlock() + + ch := make(chan *PluginUpdateEvent, 1) + p.subscriptions[ch] = struct{}{} + return ch +} + +func (p *pluginEventBroadcaster) unsubscribe(ch chan *PluginUpdateEvent) { + p.subscriptionsLock.Lock() + defer p.subscriptionsLock.Unlock() + + _, ok := p.subscriptions[ch] + if ok { + delete(p.subscriptions, ch) + close(ch) + } +} diff --git a/client/dynamicplugins/registry_test.go b/client/dynamicplugins/registry_test.go new file mode 100644 index 000000000..a3feaaac5 --- /dev/null +++ b/client/dynamicplugins/registry_test.go @@ -0,0 +1,171 @@ +package dynamicplugins + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestPluginEventBroadcaster_SendsMessagesToAllClients(t *testing.T) { + t.Parallel() + b := newPluginEventBroadcaster() + defer close(b.stopCh) + var rcv1, rcv2 bool + + ch1 := b.subscribe() + ch2 := b.subscribe() + + listenFunc := func(ch chan *PluginUpdateEvent, updateBool *bool) { + select { + case <-ch: + *updateBool = true + } + } + + go listenFunc(ch1, &rcv1) + go listenFunc(ch2, &rcv2) + + b.broadcast(&PluginUpdateEvent{}) + + require.Eventually(t, func() bool { + return rcv1 == true && rcv2 == true + }, 1*time.Second, 200*time.Millisecond) +} + +func TestPluginEventBroadcaster_UnsubscribeWorks(t *testing.T) { + t.Parallel() + + b := newPluginEventBroadcaster() + defer close(b.stopCh) + var rcv1 bool + + ch1 := b.subscribe() + + listenFunc := func(ch chan *PluginUpdateEvent, updateBool *bool) { + select { + case e := <-ch: + if e == nil { + *updateBool = true + } + } + } + + go listenFunc(ch1, &rcv1) + + b.unsubscribe(ch1) + + b.broadcast(&PluginUpdateEvent{}) + + require.Eventually(t, func() bool { + return rcv1 == true + }, 1*time.Second, 200*time.Millisecond) +} + +func TestDynamicRegistry_RegisterPlugin_SendsUpdateEvents(t *testing.T) { + t.Parallel() + r := NewRegistry(nil) + + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + + ch := r.PluginsUpdatedCh(ctx, "csi") + receivedRegistrationEvent := false + + listenFunc := func(ch <-chan *PluginUpdateEvent, updateBool *bool) { + select { + case e := <-ch: + if e == nil { + return + } + + if e.EventType == EventTypeRegistered { + *updateBool = true + } + } + } + + go listenFunc(ch, &receivedRegistrationEvent) + + err := r.RegisterPlugin(&PluginInfo{ + Type: "csi", + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + + require.NoError(t, err) + + require.Eventually(t, func() bool { + return receivedRegistrationEvent == true + }, 1*time.Second, 200*time.Millisecond) +} + +func TestDynamicRegistry_DeregisterPlugin_SendsUpdateEvents(t *testing.T) { + t.Parallel() + r := NewRegistry(nil) + + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + + ch := r.PluginsUpdatedCh(ctx, "csi") + receivedDeregistrationEvent := false + + listenFunc := func(ch <-chan *PluginUpdateEvent, updateBool *bool) { + for { + select { + case e := <-ch: + if e == nil { + return + } + + if e.EventType == EventTypeDeregistered { + *updateBool = true + } + } + } + } + + go listenFunc(ch, &receivedDeregistrationEvent) + + err := r.RegisterPlugin(&PluginInfo{ + Type: "csi", + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + require.NoError(t, err) + + err = r.DeregisterPlugin("csi", "my-plugin") + require.NoError(t, err) + + require.Eventually(t, func() bool { + return receivedDeregistrationEvent == true + }, 1*time.Second, 200*time.Millisecond) +} + +func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) { + dispenseFn := func(i *PluginInfo) (interface{}, error) { + return struct{}{}, nil + } + + registry := NewRegistry(map[string]PluginDispenser{"csi": dispenseFn}) + + err := registry.RegisterPlugin(&PluginInfo{ + Type: "csi", + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + require.NoError(t, err) + + result, err := registry.DispensePlugin("unknown-type", "unknown-name") + require.Nil(t, result) + require.EqualError(t, err, "no plugin dispenser found for type: unknown-type") + + result, err = registry.DispensePlugin("csi", "unknown-name") + require.Nil(t, result) + require.EqualError(t, err, "plugin unknown-name for type csi not found") + + result, err = registry.DispensePlugin("csi", "my-plugin") + require.NotNil(t, result) + require.NoError(t, err) +} diff --git a/client/node_updater.go b/client/node_updater.go index 702cfe8c2..115150da5 100644 --- a/client/node_updater.go +++ b/client/node_updater.go @@ -7,6 +7,7 @@ import ( "time" "github.com/hashicorp/nomad/client/devicemanager" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" "github.com/hashicorp/nomad/nomad/structs" ) @@ -40,6 +41,23 @@ SEND_BATCH: c.configLock.Lock() defer c.configLock.Unlock() + // csi updates + var csiChanged bool + c.batchNodeUpdates.batchCSIUpdates(func(name string, info *structs.CSIInfo) { + if c.updateNodeFromCSIControllerLocked(name, info) { + if c.config.Node.CSIControllerPlugins[name].UpdateTime.IsZero() { + c.config.Node.CSIControllerPlugins[name].UpdateTime = time.Now() + } + csiChanged = true + } + if c.updateNodeFromCSINodeLocked(name, info) { + if c.config.Node.CSINodePlugins[name].UpdateTime.IsZero() { + c.config.Node.CSINodePlugins[name].UpdateTime = time.Now() + } + csiChanged = true + } + }) + // driver node updates var driverChanged bool c.batchNodeUpdates.batchDriverUpdates(func(driver string, info *structs.DriverInfo) { @@ -61,13 +79,128 @@ SEND_BATCH: }) // only update the node if changes occurred - if driverChanged || devicesChanged { + if driverChanged || devicesChanged || csiChanged { c.updateNodeLocked() } close(c.fpInitialized) } +// updateNodeFromCSI receives a CSIInfo struct for the plugin and updates the +// node accordingly +func (c *Client) updateNodeFromCSI(name string, info *structs.CSIInfo) { + c.configLock.Lock() + defer c.configLock.Unlock() + + changed := false + + if c.updateNodeFromCSIControllerLocked(name, info) { + if c.config.Node.CSIControllerPlugins[name].UpdateTime.IsZero() { + c.config.Node.CSIControllerPlugins[name].UpdateTime = time.Now() + } + changed = true + } + + if c.updateNodeFromCSINodeLocked(name, info) { + if c.config.Node.CSINodePlugins[name].UpdateTime.IsZero() { + c.config.Node.CSINodePlugins[name].UpdateTime = time.Now() + } + changed = true + } + + if changed { + c.updateNodeLocked() + } +} + +// updateNodeFromCSIControllerLocked makes the changes to the node from a csi +// update but does not send the update to the server. c.configLock must be held +// before calling this func. +// +// It is safe to call for all CSI Updates, but will only perform changes when +// a ControllerInfo field is present. +func (c *Client) updateNodeFromCSIControllerLocked(name string, info *structs.CSIInfo) bool { + var changed bool + if info.ControllerInfo == nil { + return false + } + i := info.Copy() + i.NodeInfo = nil + + oldController, hadController := c.config.Node.CSIControllerPlugins[name] + if !hadController { + // If the controller info has not yet been set, do that here + changed = true + c.config.Node.CSIControllerPlugins[name] = i + } else { + // The controller info has already been set, fix it up + if !oldController.Equal(i) { + c.config.Node.CSIControllerPlugins[name] = i + changed = true + } + + // If health state has changed, trigger node event + if oldController.Healthy != i.Healthy || oldController.HealthDescription != i.HealthDescription { + changed = true + if i.HealthDescription != "" { + event := &structs.NodeEvent{ + Subsystem: "CSI", + Message: i.HealthDescription, + Timestamp: time.Now(), + Details: map[string]string{"plugin": name, "type": "controller"}, + } + c.triggerNodeEvent(event) + } + } + } + + return changed +} + +// updateNodeFromCSINodeLocked makes the changes to the node from a csi +// update but does not send the update to the server. c.configLock must be hel +// before calling this func. +// +// It is safe to call for all CSI Updates, but will only perform changes when +// a NodeInfo field is present. +func (c *Client) updateNodeFromCSINodeLocked(name string, info *structs.CSIInfo) bool { + var changed bool + if info.NodeInfo == nil { + return false + } + i := info.Copy() + i.ControllerInfo = nil + + oldNode, hadNode := c.config.Node.CSINodePlugins[name] + if !hadNode { + // If the Node info has not yet been set, do that here + changed = true + c.config.Node.CSINodePlugins[name] = i + } else { + // The node info has already been set, fix it up + if !oldNode.Equal(info) { + c.config.Node.CSINodePlugins[name] = i + changed = true + } + + // If health state has changed, trigger node event + if oldNode.Healthy != i.Healthy || oldNode.HealthDescription != i.HealthDescription { + changed = true + if i.HealthDescription != "" { + event := &structs.NodeEvent{ + Subsystem: "CSI", + Message: i.HealthDescription, + Timestamp: time.Now(), + Details: map[string]string{"plugin": name, "type": "node"}, + } + c.triggerNodeEvent(event) + } + } + } + + return changed +} + // updateNodeFromDriver receives a DriverInfo struct for the driver and updates // the node accordingly func (c *Client) updateNodeFromDriver(name string, info *structs.DriverInfo) { @@ -187,20 +320,66 @@ type batchNodeUpdates struct { devicesBatched bool devicesCB devicemanager.UpdateNodeDevicesFn devicesMu sync.Mutex + + // access to csi fields must hold csiMu lock + csiNodePlugins map[string]*structs.CSIInfo + csiControllerPlugins map[string]*structs.CSIInfo + csiBatched bool + csiCB csimanager.UpdateNodeCSIInfoFunc + csiMu sync.Mutex } func newBatchNodeUpdates( driverCB drivermanager.UpdateNodeDriverInfoFn, - devicesCB devicemanager.UpdateNodeDevicesFn) *batchNodeUpdates { + devicesCB devicemanager.UpdateNodeDevicesFn, + csiCB csimanager.UpdateNodeCSIInfoFunc) *batchNodeUpdates { return &batchNodeUpdates{ - drivers: make(map[string]*structs.DriverInfo), - driverCB: driverCB, - devices: []*structs.NodeDeviceResource{}, - devicesCB: devicesCB, + drivers: make(map[string]*structs.DriverInfo), + driverCB: driverCB, + devices: []*structs.NodeDeviceResource{}, + devicesCB: devicesCB, + csiNodePlugins: make(map[string]*structs.CSIInfo), + csiControllerPlugins: make(map[string]*structs.CSIInfo), + csiCB: csiCB, } } +// updateNodeFromCSI implements csimanager.UpdateNodeCSIInfoFunc and is used in +// the csi manager to send csi fingerprints to the server. Currently it registers +// all plugins as both controller and node plugins. +// TODO: separate node and controller plugin handling. +func (b *batchNodeUpdates) updateNodeFromCSI(plugin string, info *structs.CSIInfo) { + b.csiMu.Lock() + defer b.csiMu.Unlock() + if b.csiBatched { + b.csiCB(plugin, info) + return + } + + b.csiNodePlugins[plugin] = info + b.csiControllerPlugins[plugin] = info +} + +// batchCSIUpdates sends all of the batched CSI updates by calling f for each +// plugin batched +func (b *batchNodeUpdates) batchCSIUpdates(f csimanager.UpdateNodeCSIInfoFunc) error { + b.csiMu.Lock() + defer b.csiMu.Unlock() + if b.csiBatched { + return fmt.Errorf("csi updates already batched") + } + + b.csiBatched = true + for plugin, info := range b.csiNodePlugins { + f(plugin, info) + } + for plugin, info := range b.csiControllerPlugins { + f(plugin, info) + } + return nil +} + // updateNodeFromDriver implements drivermanager.UpdateNodeDriverInfoFn and is // used in the driver manager to send driver fingerprints to func (b *batchNodeUpdates) updateNodeFromDriver(driver string, info *structs.DriverInfo) { diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go new file mode 100644 index 000000000..9de20ce8b --- /dev/null +++ b/client/pluginmanager/csimanager/instance.go @@ -0,0 +1,203 @@ +package csimanager + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" +) + +const managerFingerprintInterval = 30 * time.Second + +// instanceManager is used to manage the fingerprinting and supervision of a +// single CSI Plugin. +type instanceManager struct { + info *dynamicplugins.PluginInfo + logger hclog.Logger + + updater UpdateNodeCSIInfoFunc + + shutdownCtx context.Context + shutdownCtxCancelFn context.CancelFunc + shutdownCh chan struct{} + + fingerprintNode bool + fingerprintController bool + + client csi.CSIPlugin +} + +func newInstanceManager(logger hclog.Logger, updater UpdateNodeCSIInfoFunc, p *dynamicplugins.PluginInfo) *instanceManager { + ctx, cancelFn := context.WithCancel(context.Background()) + + return &instanceManager{ + logger: logger.Named(p.Name), + info: p, + updater: updater, + + fingerprintNode: p.Type == dynamicplugins.PluginTypeCSINode, + fingerprintController: p.Type == dynamicplugins.PluginTypeCSIController, + + shutdownCtx: ctx, + shutdownCtxCancelFn: cancelFn, + shutdownCh: make(chan struct{}), + } +} + +func (i *instanceManager) run() { + c, err := csi.NewClient(i.info.ConnectionInfo.SocketPath) + if err != nil { + i.logger.Error("failed to setup instance manager client", "error", err) + close(i.shutdownCh) + return + } + i.client = c + + go i.runLoop() +} + +func (i *instanceManager) requestCtxWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) { + return context.WithTimeout(i.shutdownCtx, timeout) +} + +func (i *instanceManager) runLoop() { + // basicInfo holds a cache of data that should not change within a CSI plugin. + // This allows us to minimize the number of requests we make to plugins on each + // run of the fingerprinter, and reduces the chances of performing overly + // expensive actions repeatedly, and improves stability of data through + // transient failures. + var basicInfo *structs.CSIInfo + + timer := time.NewTimer(0) + for { + select { + case <-i.shutdownCtx.Done(): + if i.client != nil { + i.client.Close() + i.client = nil + } + close(i.shutdownCh) + return + case <-timer.C: + ctx, cancelFn := i.requestCtxWithTimeout(managerFingerprintInterval) + + if basicInfo == nil { + info, err := i.buildBasicFingerprint(ctx) + if err != nil { + // If we receive a fingerprinting error, update the stats with as much + // info as possible and wait for the next fingerprint interval. + info.HealthDescription = fmt.Sprintf("failed initial fingerprint with err: %v", err) + cancelFn() + i.updater(i.info.Name, basicInfo) + timer.Reset(managerFingerprintInterval) + continue + } + + // If fingerprinting succeeded, we don't need to repopulate the basic + // info and we can stop here. + basicInfo = info + } + + info := basicInfo.Copy() + var fp *structs.CSIInfo + var err error + + if i.fingerprintNode { + fp, err = i.buildNodeFingerprint(ctx, info) + } else if i.fingerprintController { + fp, err = i.buildControllerFingerprint(ctx, info) + } + + if err != nil { + info.Healthy = false + info.HealthDescription = fmt.Sprintf("failed fingerprinting with error: %v", err) + } else { + info = fp + } + + cancelFn() + i.updater(i.info.Name, info) + timer.Reset(managerFingerprintInterval) + } + } +} + +func (i *instanceManager) buildControllerFingerprint(ctx context.Context, base *structs.CSIInfo) (*structs.CSIInfo, error) { + fp := base.Copy() + + healthy, err := i.client.PluginProbe(ctx) + if err != nil { + return nil, err + } + fp.SetHealthy(healthy) + + return fp, nil +} + +func (i *instanceManager) buildNodeFingerprint(ctx context.Context, base *structs.CSIInfo) (*structs.CSIInfo, error) { + fp := base.Copy() + + healthy, err := i.client.PluginProbe(ctx) + if err != nil { + return nil, err + } + fp.SetHealthy(healthy) + + return fp, nil +} + +func structCSITopologyFromCSITopology(a *csi.Topology) *structs.CSITopology { + if a == nil { + return nil + } + + return &structs.CSITopology{ + Segments: helper.CopyMapStringString(a.Segments), + } +} + +func (i *instanceManager) buildBasicFingerprint(ctx context.Context) (*structs.CSIInfo, error) { + info := &structs.CSIInfo{ + PluginID: i.info.Name, + Healthy: false, + HealthDescription: "initial fingerprint not completed", + } + + if i.fingerprintNode { + info.NodeInfo = &structs.CSINodeInfo{} + } + if i.fingerprintController { + info.ControllerInfo = &structs.CSIControllerInfo{} + } + + capabilities, err := i.client.PluginGetCapabilities(ctx) + if err != nil { + return info, err + } + + info.RequiresControllerPlugin = capabilities.HasControllerService() + info.RequiresTopologies = capabilities.HasToplogies() + + if i.fingerprintNode { + nodeInfo, err := i.client.NodeGetInfo(ctx) + if err != nil { + return info, err + } + + info.NodeInfo.ID = nodeInfo.NodeID + info.NodeInfo.MaxVolumes = nodeInfo.MaxVolumes + info.NodeInfo.AccessibleTopology = structCSITopologyFromCSITopology(nodeInfo.AccessibleTopology) + } + + return info, nil +} + +func (i *instanceManager) shutdown() { + i.shutdownCtxCancelFn() + <-i.shutdownCh +} diff --git a/client/pluginmanager/csimanager/instance_test.go b/client/pluginmanager/csimanager/instance_test.go new file mode 100644 index 000000000..ca30a321e --- /dev/null +++ b/client/pluginmanager/csimanager/instance_test.go @@ -0,0 +1,159 @@ +package csimanager + +import ( + "context" + "errors" + "testing" + + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" + "github.com/hashicorp/nomad/plugins/csi/fake" + "github.com/stretchr/testify/require" +) + +func setupTestNodeInstanceManager(t *testing.T) (*fake.Client, *instanceManager) { + tp := &fake.Client{} + + logger := testlog.HCLogger(t) + pinfo := &dynamicplugins.PluginInfo{ + Name: "test-plugin", + } + + return tp, &instanceManager{ + logger: logger, + info: pinfo, + client: tp, + fingerprintNode: true, + } +} + +func TestBuildBasicFingerprint_Node(t *testing.T) { + tt := []struct { + Name string + + Capabilities *csi.PluginCapabilitySet + CapabilitiesErr error + CapabilitiesCallCount int64 + + NodeInfo *csi.NodeGetInfoResponse + NodeInfoErr error + NodeInfoCallCount int64 + + ExpectedCSIInfo *structs.CSIInfo + ExpectedErr error + }{ + { + Name: "Minimal successful response", + + Capabilities: &csi.PluginCapabilitySet{}, + CapabilitiesCallCount: 1, + + NodeInfo: &csi.NodeGetInfoResponse{ + NodeID: "foobar", + MaxVolumes: 5, + AccessibleTopology: nil, + }, + NodeInfoCallCount: 1, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + NodeInfo: &structs.CSINodeInfo{ + ID: "foobar", + MaxVolumes: 5, + }, + }, + }, + { + Name: "Successful response with capabilities and topologies", + + Capabilities: csi.NewTestPluginCapabilitySet(true, false), + CapabilitiesCallCount: 1, + + NodeInfo: &csi.NodeGetInfoResponse{ + NodeID: "foobar", + MaxVolumes: 5, + AccessibleTopology: &csi.Topology{ + Segments: map[string]string{ + "com.hashicorp.nomad/node-id": "foobar", + }, + }, + }, + NodeInfoCallCount: 1, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + + RequiresTopologies: true, + + NodeInfo: &structs.CSINodeInfo{ + ID: "foobar", + MaxVolumes: 5, + AccessibleTopology: &structs.CSITopology{ + Segments: map[string]string{ + "com.hashicorp.nomad/node-id": "foobar", + }, + }, + }, + }, + }, + { + Name: "PluginGetCapabilities Failed", + + CapabilitiesErr: errors.New("request failed"), + CapabilitiesCallCount: 1, + + NodeInfoCallCount: 0, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + NodeInfo: &structs.CSINodeInfo{}, + }, + ExpectedErr: errors.New("request failed"), + }, + { + Name: "NodeGetInfo Failed", + + Capabilities: &csi.PluginCapabilitySet{}, + CapabilitiesCallCount: 1, + + NodeInfoErr: errors.New("request failed"), + NodeInfoCallCount: 1, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + NodeInfo: &structs.CSINodeInfo{}, + }, + ExpectedErr: errors.New("request failed"), + }, + } + + for _, test := range tt { + t.Run(test.Name, func(t *testing.T) { + client, im := setupTestNodeInstanceManager(t) + + client.NextPluginGetCapabilitiesResponse = test.Capabilities + client.NextPluginGetCapabilitiesErr = test.CapabilitiesErr + + client.NextNodeGetInfoResponse = test.NodeInfo + client.NextNodeGetInfoErr = test.NodeInfoErr + + info, err := im.buildBasicFingerprint(context.TODO()) + + require.Equal(t, test.ExpectedCSIInfo, info) + require.Equal(t, test.ExpectedErr, err) + + require.Equal(t, test.CapabilitiesCallCount, client.PluginGetCapabilitiesCallCount) + require.Equal(t, test.NodeInfoCallCount, client.NodeGetInfoCallCount) + }) + } +} diff --git a/client/pluginmanager/csimanager/manager.go b/client/pluginmanager/csimanager/manager.go new file mode 100644 index 000000000..ebcbcc89f --- /dev/null +++ b/client/pluginmanager/csimanager/manager.go @@ -0,0 +1,153 @@ +package csimanager + +import ( + "context" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/client/pluginmanager" + "github.com/hashicorp/nomad/nomad/structs" +) + +// defaultPluginResyncPeriod is the time interval used to do a full resync +// against the dynamicplugins, to account for missed updates. +const defaultPluginResyncPeriod = 30 * time.Second + +// UpdateNodeCSIInfoFunc is the callback used to update the node from +// fingerprinting +type UpdateNodeCSIInfoFunc func(string, *structs.CSIInfo) + +type Config struct { + Logger hclog.Logger + DynamicRegistry dynamicplugins.Registry + UpdateNodeCSIInfoFunc UpdateNodeCSIInfoFunc + PluginResyncPeriod time.Duration +} + +// New returns a new PluginManager that will handle managing CSI plugins from +// the dynamicRegistry from the provided Config. +func New(config *Config) pluginmanager.PluginManager { + // Use a dedicated internal context for managing plugin shutdown. + ctx, cancelFn := context.WithCancel(context.Background()) + + if config.PluginResyncPeriod == 0 { + config.PluginResyncPeriod = defaultPluginResyncPeriod + } + + return &csiManager{ + logger: config.Logger, + registry: config.DynamicRegistry, + instances: make(map[string]map[string]*instanceManager), + + updateNodeCSIInfoFunc: config.UpdateNodeCSIInfoFunc, + pluginResyncPeriod: config.PluginResyncPeriod, + + shutdownCtx: ctx, + shutdownCtxCancelFn: cancelFn, + shutdownCh: make(chan struct{}), + } +} + +type csiManager struct { + // instances should only be accessed from the run() goroutine and the shutdown + // fn. It is a map of PluginType : [PluginName : instanceManager] + instances map[string]map[string]*instanceManager + + registry dynamicplugins.Registry + logger hclog.Logger + pluginResyncPeriod time.Duration + + updateNodeCSIInfoFunc UpdateNodeCSIInfoFunc + + shutdownCtx context.Context + shutdownCtxCancelFn context.CancelFunc + shutdownCh chan struct{} +} + +// Run starts a plugin manager and should return early +func (c *csiManager) Run() { + go c.runLoop() +} + +func (c *csiManager) runLoop() { + // TODO: Subscribe to the events channel from the registry to receive dynamic + // updates without a full resync + timer := time.NewTimer(0) + for { + select { + case <-c.shutdownCtx.Done(): + close(c.shutdownCh) + return + case <-timer.C: + c.resyncPluginsFromRegistry("csi-controller") + c.resyncPluginsFromRegistry("csi-node") + timer.Reset(c.pluginResyncPeriod) + } + } +} + +// resyncPluginsFromRegistry does a full sync of the running instance managers +// against those in the registry. Eventually we should primarily be using +// update events from the registry, but this is an ok fallback for now. +func (c *csiManager) resyncPluginsFromRegistry(ptype string) { + plugins := c.registry.ListPlugins(ptype) + seen := make(map[string]struct{}, len(plugins)) + + pluginMap, ok := c.instances[ptype] + if !ok { + pluginMap = make(map[string]*instanceManager) + c.instances[ptype] = pluginMap + } + + // For every plugin in the registry, ensure that we have an existing plugin + // running. Also build the map of valid plugin names. + for _, plugin := range plugins { + seen[plugin.Name] = struct{}{} + if _, ok := pluginMap[plugin.Name]; !ok { + c.logger.Debug("detected new CSI plugin", "name", plugin.Name, "type", ptype) + mgr := newInstanceManager(c.logger, c.updateNodeCSIInfoFunc, plugin) + pluginMap[plugin.Name] = mgr + mgr.run() + } + } + + // For every instance manager, if we did not find it during the plugin + // iterator, shut it down and remove it from the table. + for name, mgr := range pluginMap { + if _, ok := seen[name]; !ok { + c.logger.Info("shutting down CSI plugin", "name", name, "type", ptype) + mgr.shutdown() + delete(pluginMap, name) + } + } +} + +// Shutdown should gracefully shutdown all plugins managed by the manager. +// It must block until shutdown is complete +func (c *csiManager) Shutdown() { + // Shut down the run loop + c.shutdownCtxCancelFn() + + // Wait for plugin manager shutdown to complete + <-c.shutdownCh + + // Shutdown all the instance managers in parallel + var wg sync.WaitGroup + for _, pluginMap := range c.instances { + for _, mgr := range pluginMap { + wg.Add(1) + go func(mgr *instanceManager) { + mgr.shutdown() + wg.Done() + }(mgr) + } + } + wg.Wait() +} + +// PluginType is the type of plugin which the manager manages +func (c *csiManager) PluginType() string { + return "csi" +} diff --git a/client/pluginmanager/csimanager/manager_test.go b/client/pluginmanager/csimanager/manager_test.go new file mode 100644 index 000000000..408168ca2 --- /dev/null +++ b/client/pluginmanager/csimanager/manager_test.go @@ -0,0 +1,111 @@ +package csimanager + +import ( + "testing" + "time" + + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/client/pluginmanager" + "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" +) + +var _ pluginmanager.PluginManager = (*csiManager)(nil) + +var fakePlugin = &dynamicplugins.PluginInfo{ + Name: "my-plugin", + Type: "csi-controller", + ConnectionInfo: &dynamicplugins.PluginConnectionInfo{}, +} + +func setupRegistry() dynamicplugins.Registry { + return dynamicplugins.NewRegistry( + map[string]dynamicplugins.PluginDispenser{ + "csi-controller": func(*dynamicplugins.PluginInfo) (interface{}, error) { + return nil, nil + }, + }) +} + +func TestCSIManager_Setup_Shutdown(t *testing.T) { + r := setupRegistry() + defer r.Shutdown() + + cfg := &Config{ + Logger: testlog.HCLogger(t), + DynamicRegistry: r, + UpdateNodeCSIInfoFunc: func(string, *structs.CSIInfo) {}, + } + pm := New(cfg).(*csiManager) + pm.Run() + pm.Shutdown() +} + +func TestCSIManager_RegisterPlugin(t *testing.T) { + registry := setupRegistry() + defer registry.Shutdown() + + require.NotNil(t, registry) + + cfg := &Config{ + Logger: testlog.HCLogger(t), + DynamicRegistry: registry, + UpdateNodeCSIInfoFunc: func(string, *structs.CSIInfo) {}, + } + pm := New(cfg).(*csiManager) + defer pm.Shutdown() + + require.NotNil(t, pm.registry) + + err := registry.RegisterPlugin(fakePlugin) + require.Nil(t, err) + + pm.Run() + + require.Eventually(t, func() bool { + pmap, ok := pm.instances[fakePlugin.Type] + if !ok { + return false + } + + _, ok = pmap[fakePlugin.Name] + return ok + }, 5*time.Second, 10*time.Millisecond) +} + +func TestCSIManager_DeregisterPlugin(t *testing.T) { + registry := setupRegistry() + defer registry.Shutdown() + + require.NotNil(t, registry) + + cfg := &Config{ + Logger: testlog.HCLogger(t), + DynamicRegistry: registry, + UpdateNodeCSIInfoFunc: func(string, *structs.CSIInfo) {}, + PluginResyncPeriod: 500 * time.Millisecond, + } + pm := New(cfg).(*csiManager) + defer pm.Shutdown() + + require.NotNil(t, pm.registry) + + err := registry.RegisterPlugin(fakePlugin) + require.Nil(t, err) + + pm.Run() + + require.Eventually(t, func() bool { + _, ok := pm.instances[fakePlugin.Type][fakePlugin.Name] + return ok + }, 5*time.Second, 10*time.Millisecond) + + err = registry.DeregisterPlugin(fakePlugin.Type, fakePlugin.Name) + require.Nil(t, err) + + require.Eventually(t, func() bool { + _, ok := pm.instances[fakePlugin.Type][fakePlugin.Name] + return !ok + }, 5*time.Second, 10*time.Millisecond) +} diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index b394ed357..0483c018f 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -812,6 +812,7 @@ func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) { structsTask.Kind = structs.TaskKind(apiTask.Kind) structsTask.Constraints = ApiConstraintsToStructs(apiTask.Constraints) structsTask.Affinities = ApiAffinitiesToStructs(apiTask.Affinities) + structsTask.CSIPluginConfig = ApiCSIPluginConfigToStructsCSIPluginConfig(apiTask.CSIPluginConfig) if l := len(apiTask.VolumeMounts); l != 0 { structsTask.VolumeMounts = make([]*structs.VolumeMount, l) @@ -933,6 +934,18 @@ func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) { } } +func ApiCSIPluginConfigToStructsCSIPluginConfig(apiConfig *api.TaskCSIPluginConfig) *structs.TaskCSIPluginConfig { + if apiConfig == nil { + return nil + } + + sc := &structs.TaskCSIPluginConfig{} + sc.ID = apiConfig.ID + sc.Type = structs.CSIPluginType(apiConfig.Type) + sc.MountDir = apiConfig.MountDir + return sc +} + func ApiResourcesToStructs(in *api.Resources) *structs.Resources { if in == nil { return nil diff --git a/jobspec/parse_task.go b/jobspec/parse_task.go index dbd20abdd..a59c88331 100644 --- a/jobspec/parse_task.go +++ b/jobspec/parse_task.go @@ -74,6 +74,7 @@ func parseTask(item *ast.ObjectItem) (*api.Task, error) { "kill_signal", "kind", "volume_mount", + "csi_plugin", } if err := helper.CheckHCLKeys(listVal, valid); err != nil { return nil, err @@ -97,6 +98,7 @@ func parseTask(item *ast.ObjectItem) (*api.Task, error) { delete(m, "template") delete(m, "vault") delete(m, "volume_mount") + delete(m, "csi_plugin") // Build the task var t api.Task @@ -135,6 +137,25 @@ func parseTask(item *ast.ObjectItem) (*api.Task, error) { t.Services = services } + if o := listVal.Filter("csi_plugin"); len(o.Items) > 0 { + if len(o.Items) != 1 { + return nil, fmt.Errorf("csi_plugin -> Expected single stanza, got %d", len(o.Items)) + } + i := o.Elem().Items[0] + + var m map[string]interface{} + if err := hcl.DecodeObject(&m, i.Val); err != nil { + return nil, err + } + + var cfg api.TaskCSIPluginConfig + if err := mapstructure.WeakDecode(m, &cfg); err != nil { + return nil, err + } + + t.CSIPluginConfig = &cfg + } + // If we have config, then parse that if o := listVal.Filter("config"); len(o.Items) > 0 { for _, o := range o.Elem().Items { diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index 13639a1b2..ed66f05ae 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -569,6 +569,30 @@ func TestParse(t *testing.T) { }, false, }, + { + "csi-plugin.hcl", + &api.Job{ + ID: helper.StringToPtr("binstore-storagelocker"), + Name: helper.StringToPtr("binstore-storagelocker"), + TaskGroups: []*api.TaskGroup{ + { + Name: helper.StringToPtr("binsl"), + Tasks: []*api.Task{ + { + Name: "binstore", + Driver: "docker", + CSIPluginConfig: &api.TaskCSIPluginConfig{ + ID: "org.hashicorp.csi", + Type: api.CSIPluginTypeMonolith, + MountDir: "/csi/test", + }, + }, + }, + }, + }, + }, + false, + }, { "service-check-initial-status.hcl", &api.Job{ diff --git a/jobspec/test-fixtures/csi-plugin.hcl b/jobspec/test-fixtures/csi-plugin.hcl new file mode 100644 index 000000000..b879da184 --- /dev/null +++ b/jobspec/test-fixtures/csi-plugin.hcl @@ -0,0 +1,13 @@ +job "binstore-storagelocker" { + group "binsl" { + task "binstore" { + driver = "docker" + + csi_plugin { + id = "org.hashicorp.csi" + type = "monolith" + mount_dir = "/csi/test" + } + } + } +} diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go new file mode 100644 index 000000000..b00b2fa72 --- /dev/null +++ b/nomad/structs/csi.go @@ -0,0 +1,68 @@ +package structs + +// CSISocketName is the filename that Nomad expects plugins to create inside the +// PluginMountDir. +const CSISocketName = "csi.sock" + +// CSIIntermediaryDirname is the name of the directory inside the PluginMountDir +// where Nomad will expect plugins to create intermediary mounts for volumes. +const CSIIntermediaryDirname = "volumes" + +// CSIPluginType is an enum string that encapsulates the valid options for a +// CSIPlugin stanza's Type. These modes will allow the plugin to be used in +// different ways by the client. +type CSIPluginType string + +const ( + // CSIPluginTypeNode indicates that Nomad should only use the plugin for + // performing Node RPCs against the provided plugin. + CSIPluginTypeNode CSIPluginType = "node" + + // CSIPluginTypeController indicates that Nomad should only use the plugin for + // performing Controller RPCs against the provided plugin. + CSIPluginTypeController CSIPluginType = "controller" + + // CSIPluginTypeMonolith indicates that Nomad can use the provided plugin for + // both controller and node rpcs. + CSIPluginTypeMonolith CSIPluginType = "monolith" +) + +// CSIPluginTypeIsValid validates the given CSIPluginType string and returns +// true only when a correct plugin type is specified. +func CSIPluginTypeIsValid(pt CSIPluginType) bool { + switch pt { + case CSIPluginTypeNode, CSIPluginTypeController, CSIPluginTypeMonolith: + return true + default: + return false + } +} + +// TaskCSIPluginConfig contains the data that is required to setup a task as a +// CSI plugin. This will be used by the csi_plugin_supervisor_hook to configure +// mounts for the plugin and initiate the connection to the plugin catalog. +type TaskCSIPluginConfig struct { + // ID is the identifier of the plugin. + // Ideally this should be the FQDN of the plugin. + ID string + + // Type instructs Nomad on how to handle processing a plugin + Type CSIPluginType + + // MountDir is the destination that nomad should mount in its CSI + // directory for the plugin. It will then expect a file called CSISocketName + // to be created by the plugin, and will provide references into + // "MountDir/CSIIntermediaryDirname/{VolumeName}/{AllocID} for mounts. + MountDir string +} + +func (t *TaskCSIPluginConfig) Copy() *TaskCSIPluginConfig { + if t == nil { + return nil + } + + nt := new(TaskCSIPluginConfig) + *nt = *t + + return nt +} diff --git a/nomad/structs/node.go b/nomad/structs/node.go index 76758fb8e..7143c42e2 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -1,11 +1,177 @@ package structs import ( + "reflect" "time" "github.com/hashicorp/nomad/helper" ) +// CSITopology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// +// According to CSI, there are a few requirements for the keys within this map: +// - Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// - The key name segment is REQUIRED. The prefix is OPTIONAL. +// - The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// - The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// - The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// - If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// - Each value (topological segment) MUST contain 1 or more strings. +// - Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +// +// However, Nomad applies lighter restrictions to these, as they are already +// only referenced by plugin within the scheduler and as such collisions and +// related concerns are less of an issue. We may implement these restrictions +// in the future. +type CSITopology struct { + Segments map[string]string +} + +func (t *CSITopology) Copy() *CSITopology { + if t == nil { + return nil + } + + return &CSITopology{ + Segments: helper.CopyMapStringString(t.Segments), + } +} + +// CSINodeInfo is the fingerprinted data from a CSI Plugin that is specific to +// the Node API. +type CSINodeInfo struct { + // ID is the identity of a given nomad client as observed by the storage + // provider. + ID string + + // MaxVolumes is the maximum number of volumes that can be attached to the + // current host via this provider. + // If 0 then unlimited volumes may be attached. + MaxVolumes int64 + + // AccessibleTopology specifies where (regions, zones, racks, etc.) the node is + // accessible from within the storage provider. + // + // A plugin that returns this field MUST also set the `RequiresTopologies` + // property. + // + // This field is OPTIONAL. If it is not specified, then we assume that the + // the node is not subject to any topological constraint, and MAY + // schedule workloads that reference any volume V, such that there are + // no topological constraints declared for V. + // + // Example 1: + // accessible_topology = + // {"region": "R1", "zone": "Z2"} + // Indicates the node exists within the "region" "R1" and the "zone" + // "Z2" within the storage provider. + AccessibleTopology *CSITopology +} + +func (n *CSINodeInfo) Copy() *CSINodeInfo { + if n == nil { + return nil + } + + nc := new(CSINodeInfo) + *nc = *n + nc.AccessibleTopology = n.AccessibleTopology.Copy() + + return nc +} + +// CSIControllerInfo is the fingerprinted data from a CSI Plugin that is specific to +// the Controller API. +type CSIControllerInfo struct { + // Currently empty +} + +func (c *CSIControllerInfo) Copy() *CSIControllerInfo { + if c == nil { + return nil + } + + nc := new(CSIControllerInfo) + *nc = *c + + return nc +} + +// CSIInfo is the current state of a single CSI Plugin. This is updated regularly +// as plugin health changes on the node. +type CSIInfo struct { + PluginID string + Healthy bool + HealthDescription string + UpdateTime time.Time + + // RequiresControllerPlugin is set when the CSI Plugin returns the + // CONTROLLER_SERVICE capability. When this is true, the volumes should not be + // scheduled on this client until a matching controller plugin is available. + RequiresControllerPlugin bool + + // RequiresTopologies is set when the CSI Plugin returns the + // VOLUME_ACCESSIBLE_CONSTRAINTS capability. When this is true, we must + // respect the Volume and Node Topology information. + RequiresTopologies bool + + // CSI Specific metadata + ControllerInfo *CSIControllerInfo `json:",omitempty"` + NodeInfo *CSINodeInfo `json:",omitempty"` +} + +func (c *CSIInfo) Copy() *CSIInfo { + if c == nil { + return nil + } + + nc := new(CSIInfo) + *nc = *c + nc.ControllerInfo = c.ControllerInfo.Copy() + nc.NodeInfo = c.NodeInfo.Copy() + + return nc +} + +func (c *CSIInfo) SetHealthy(hs bool) { + c.Healthy = hs + if hs { + c.HealthDescription = "healthy" + } else { + c.HealthDescription = "unhealthy" + } +} + +func (c *CSIInfo) Equal(o *CSIInfo) bool { + if c == nil && o == nil { + return c == o + } + + nc := *c + nc.UpdateTime = time.Time{} + no := *o + no.UpdateTime = time.Time{} + + return reflect.DeepEqual(nc, no) +} + // DriverInfo is the current state of a single driver. This is updated // regularly as driver health changes on the node. type DriverInfo struct { diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 28942c1b7..ee475bccb 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -1659,6 +1659,11 @@ type Node struct { // Drivers is a map of driver names to current driver information Drivers map[string]*DriverInfo + // CSIControllerPlugins is a map of plugin names to current CSI Plugin info + CSIControllerPlugins map[string]*CSIInfo + // CSINodePlugins is a map of plugin names to current CSI Plugin info + CSINodePlugins map[string]*CSIInfo + // HostVolumes is a map of host volume names to their configuration HostVolumes map[string]*ClientHostVolumeConfig @@ -1705,6 +1710,8 @@ func (n *Node) Copy() *Node { nn.Meta = helper.CopyMapStringString(nn.Meta) nn.Events = copyNodeEvents(n.Events) nn.DrainStrategy = nn.DrainStrategy.Copy() + nn.CSIControllerPlugins = copyNodeCSI(nn.CSIControllerPlugins) + nn.CSINodePlugins = copyNodeCSI(nn.CSINodePlugins) nn.Drivers = copyNodeDrivers(n.Drivers) nn.HostVolumes = copyNodeHostVolumes(n.HostVolumes) return nn @@ -1724,6 +1731,21 @@ func copyNodeEvents(events []*NodeEvent) []*NodeEvent { return c } +// copyNodeCSI is a helper to copy a map of CSIInfo +func copyNodeCSI(plugins map[string]*CSIInfo) map[string]*CSIInfo { + l := len(plugins) + if l == 0 { + return nil + } + + c := make(map[string]*CSIInfo, l) + for plugin, info := range plugins { + c[plugin] = info.Copy() + } + + return c +} + // copyNodeDrivers is a helper to copy a map of DriverInfo func copyNodeDrivers(drivers map[string]*DriverInfo) map[string]*DriverInfo { l := len(drivers) @@ -5556,6 +5578,9 @@ type Task struct { // Used internally to manage tasks according to their TaskKind. Initial use case // is for Consul Connect Kind TaskKind + + // CSIPluginConfig is used to configure the plugin supervisor for the task. + CSIPluginConfig *TaskCSIPluginConfig } // UsesConnect is for conveniently detecting if the Task is able to make use @@ -5593,6 +5618,7 @@ func (t *Task) Copy() *Task { nt.Constraints = CopySliceConstraints(nt.Constraints) nt.Affinities = CopySliceAffinities(nt.Affinities) nt.VolumeMounts = CopySliceVolumeMount(nt.VolumeMounts) + nt.CSIPluginConfig = nt.CSIPluginConfig.Copy() nt.Vault = nt.Vault.Copy() nt.Resources = nt.Resources.Copy() @@ -5811,6 +5837,19 @@ func (t *Task) Validate(ephemeralDisk *EphemeralDisk, jobType string, tgServices } } + // Validate CSI Plugin Config + if t.CSIPluginConfig != nil { + if t.CSIPluginConfig.ID == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig must have a non-empty PluginID")) + } + + if !CSIPluginTypeIsValid(t.CSIPluginConfig.Type) { + mErr.Errors = append(mErr.Errors, fmt.Errorf("CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"%s\"", t.CSIPluginConfig.Type)) + } + + // TODO: Investigate validation of the PluginMountDir. Not much we can do apart from check IsAbs until after we understand its execution environment though :( + } + return mErr.ErrorOrNil() } @@ -6336,6 +6375,12 @@ const ( // TaskRestoreFailed indicates Nomad was unable to reattach to a // restored task. TaskRestoreFailed = "Failed Restoring Task" + + // TaskPluginUnhealthy indicates that a plugin managed by Nomad became unhealthy + TaskPluginUnhealthy = "Plugin became unhealthy" + + // TaskPluginHealthy indicates that a plugin managed by Nomad became healthy + TaskPluginHealthy = "Plugin became healthy" ) // TaskEvent is an event that effects the state of a task and contains meta-data diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index f43ebf527..cba53774d 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -1781,6 +1781,55 @@ func TestTask_Validate_LogConfig(t *testing.T) { } } +func TestTask_Validate_CSIPluginConfig(t *testing.T) { + table := []struct { + name string + pc *TaskCSIPluginConfig + expectedErr string + }{ + { + name: "no errors when not specified", + pc: nil, + }, + { + name: "requires non-empty plugin id", + pc: &TaskCSIPluginConfig{}, + expectedErr: "CSIPluginConfig must have a non-empty PluginID", + }, + { + name: "requires valid plugin type", + pc: &TaskCSIPluginConfig{ + ID: "com.hashicorp.csi", + Type: "nonsense", + }, + expectedErr: "CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"nonsense\"", + }, + } + + for _, tt := range table { + t.Run(tt.name, func(t *testing.T) { + task := &Task{ + CSIPluginConfig: tt.pc, + } + ephemeralDisk := &EphemeralDisk{ + SizeMB: 1, + } + + err := task.Validate(ephemeralDisk, JobTypeService, nil) + mErr := err.(*multierror.Error) + if tt.expectedErr != "" { + if !strings.Contains(mErr.Errors[4].Error(), tt.expectedErr) { + t.Fatalf("err: %s", err) + } + } else { + if len(mErr.Errors) != 4 { + t.Fatalf("unexpected err: %s", mErr.Errors[4]) + } + } + }) + } +} + func TestTask_Validate_Template(t *testing.T) { bad := &Template{} diff --git a/plugins/csi/client.go b/plugins/csi/client.go new file mode 100644 index 000000000..5647eeba7 --- /dev/null +++ b/plugins/csi/client.go @@ -0,0 +1,210 @@ +package csi + +import ( + "context" + "fmt" + "net" + "time" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/hashicorp/nomad/plugins/base" + "github.com/hashicorp/nomad/plugins/shared/hclspec" + "google.golang.org/grpc" +) + +type NodeGetInfoResponse struct { + NodeID string + MaxVolumes int64 + AccessibleTopology *Topology +} + +// Topology is a map of topological domains to topological segments. +// A topological domain is a sub-division of a cluster, like "region", +// "zone", "rack", etc. +// +// According to CSI, there are a few requirements for the keys within this map: +// - Valid keys have two segments: an OPTIONAL prefix and name, separated +// by a slash (/), for example: "com.company.example/zone". +// - The key name segment is REQUIRED. The prefix is OPTIONAL. +// - The key name MUST be 63 characters or less, begin and end with an +// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-), +// underscores (_), dots (.), or alphanumerics in between, for example +// "zone". +// - The key prefix MUST be 63 characters or less, begin and end with a +// lower-case alphanumeric character ([a-z0-9]), contain only +// dashes (-), dots (.), or lower-case alphanumerics in between, and +// follow domain name notation format +// (https://tools.ietf.org/html/rfc1035#section-2.3.1). +// - The key prefix SHOULD include the plugin's host company name and/or +// the plugin name, to minimize the possibility of collisions with keys +// from other plugins. +// - If a key prefix is specified, it MUST be identical across all +// topology keys returned by the SP (across all RPCs). +// - Keys MUST be case-insensitive. Meaning the keys "Zone" and "zone" +// MUST not both exist. +// - Each value (topological segment) MUST contain 1 or more strings. +// - Each string MUST be 63 characters or less and begin and end with an +// alphanumeric character with '-', '_', '.', or alphanumerics in +// between. +type Topology struct { + Segments map[string]string +} + +type client struct { + conn *grpc.ClientConn + identityClient csipbv1.IdentityClient + controllerClient csipbv1.ControllerClient + nodeClient csipbv1.NodeClient +} + +func (c *client) Close() error { + if c.conn != nil { + return c.conn.Close() + } + return nil +} + +func NewClient(addr string) (CSIPlugin, error) { + if addr == "" { + return nil, fmt.Errorf("address is empty") + } + + conn, err := newGrpcConn(addr) + if err != nil { + return nil, err + } + + return &client{ + conn: conn, + identityClient: csipbv1.NewIdentityClient(conn), + controllerClient: csipbv1.NewControllerClient(conn), + nodeClient: csipbv1.NewNodeClient(conn), + }, nil +} + +func newGrpcConn(addr string) (*grpc.ClientConn, error) { + conn, err := grpc.Dial( + addr, + grpc.WithInsecure(), + grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("unix", target, timeout) + }), + ) + + if err != nil { + return nil, fmt.Errorf("failed to open grpc connection to addr: %s, err: %v", addr, err) + } + + return conn, nil +} + +// PluginInfo describes the type and version of a plugin as required by the nomad +// base.BasePlugin interface. +func (c *client) PluginInfo() (*base.PluginInfoResponse, error) { + name, err := c.PluginGetInfo(context.TODO()) + if err != nil { + return nil, err + } + + return &base.PluginInfoResponse{ + Type: "csi", + PluginApiVersions: []string{"1.0.0"}, // TODO: fingerprint csi version + PluginVersion: "1.0.0", // TODO: get plugin version from somewhere?! + Name: name, + }, nil +} + +// ConfigSchema returns the schema for parsing the plugins configuration as +// required by the base.BasePlugin interface. It will always return nil. +func (c *client) ConfigSchema() (*hclspec.Spec, error) { + return nil, nil +} + +// SetConfig is used to set the configuration by passing a MessagePack +// encoding of it. +func (c *client) SetConfig(_ *base.Config) error { + return fmt.Errorf("unsupported") +} + +func (c *client) PluginProbe(ctx context.Context) (bool, error) { + req, err := c.identityClient.Probe(ctx, &csipbv1.ProbeRequest{}) + if err != nil { + return false, err + } + + wrapper := req.GetReady() + + // wrapper.GetValue() protects against wrapper being `nil`, and returns false. + ready := wrapper.GetValue() + + if wrapper == nil { + // If the plugin returns a nil value for ready, then it should be + // interpreted as the plugin is ready for compatibility with plugins that + // do not do health checks. + ready = true + } + + return ready, nil +} + +func (c *client) PluginGetInfo(ctx context.Context) (string, error) { + if c == nil { + return "", fmt.Errorf("Client not initialized") + } + if c.identityClient == nil { + return "", fmt.Errorf("Client not initialized") + } + + req, err := c.identityClient.GetPluginInfo(ctx, &csipbv1.GetPluginInfoRequest{}) + if err != nil { + return "", err + } + + name := req.GetName() + if name == "" { + return "", fmt.Errorf("PluginGetInfo: plugin returned empty name field") + } + + return name, nil +} + +func (c *client) PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySet, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.identityClient == nil { + return nil, fmt.Errorf("Client not initialized") + } + + resp, err := c.identityClient.GetPluginCapabilities(ctx, &csipbv1.GetPluginCapabilitiesRequest{}) + if err != nil { + return nil, err + } + + return NewPluginCapabilitySet(resp), nil +} + +func (c *client) NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return nil, fmt.Errorf("Client not initialized") + } + + result := &NodeGetInfoResponse{} + + resp, err := c.nodeClient.NodeGetInfo(ctx, &csipbv1.NodeGetInfoRequest{}) + if err != nil { + return nil, err + } + + if resp.GetNodeId() == "" { + return nil, fmt.Errorf("plugin failed to return nodeid") + } + + result.NodeID = resp.GetNodeId() + result.MaxVolumes = resp.GetMaxVolumesPerNode() + + return result, nil +} diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go new file mode 100644 index 000000000..882eacfad --- /dev/null +++ b/plugins/csi/client_test.go @@ -0,0 +1,191 @@ +package csi + +import ( + "context" + "fmt" + "testing" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/protobuf/ptypes/wrappers" + fake "github.com/hashicorp/nomad/plugins/csi/testing" + "github.com/stretchr/testify/require" +) + +func newTestClient() (*fake.IdentityClient, CSIPlugin) { + ic := &fake.IdentityClient{} + client := &client{ + identityClient: ic, + } + + return ic, client +} + +func TestClient_RPC_PluginProbe(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + ProbeResponse *csipbv1.ProbeResponse + ExpectedResponse bool + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "returns false for ready when the provider returns false", + ProbeResponse: &csipbv1.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: false}, + }, + ExpectedResponse: false, + }, + { + Name: "returns true for ready when the provider returns true", + ProbeResponse: &csipbv1.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: true}, + }, + ExpectedResponse: true, + }, + { + /* When a SP does not return a ready value, a CO MAY treat this as ready. + We do so because example plugins rely on this behaviour. We may + re-evaluate this decision in the future. */ + Name: "returns true for ready when the provider returns a nil wrapper", + ProbeResponse: &csipbv1.ProbeResponse{ + Ready: nil, + }, + ExpectedResponse: true, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + ic, client := newTestClient() + defer client.Close() + + ic.NextErr = c.ResponseErr + ic.NextPluginProbe = c.ProbeResponse + + resp, err := client.PluginProbe(context.TODO()) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } + + require.Equal(t, c.ExpectedResponse, resp) + }) + } + +} + +func TestClient_RPC_PluginInfo(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + InfoResponse *csipbv1.GetPluginInfoResponse + ExpectedResponse string + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "returns an error if we receive an empty `name`", + InfoResponse: &csipbv1.GetPluginInfoResponse{ + Name: "", + }, + ExpectedErr: fmt.Errorf("PluginGetInfo: plugin returned empty name field"), + }, + { + Name: "returns the name when successfully retrieved and not empty", + InfoResponse: &csipbv1.GetPluginInfoResponse{ + Name: "com.hashicorp.storage", + }, + ExpectedResponse: "com.hashicorp.storage", + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + ic, client := newTestClient() + defer client.Close() + + ic.NextErr = c.ResponseErr + ic.NextPluginInfo = c.InfoResponse + + resp, err := client.PluginGetInfo(context.TODO()) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } + + require.Equal(t, c.ExpectedResponse, resp) + }) + } + +} + +func TestClient_RPC_PluginGetCapabilities(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.GetPluginCapabilitiesResponse + ExpectedResponse *PluginCapabilitySet + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "HasControllerService is true when it's part of the response", + Response: &csipbv1.GetPluginCapabilitiesResponse{ + Capabilities: []*csipbv1.PluginCapability{ + { + Type: &csipbv1.PluginCapability_Service_{ + Service: &csipbv1.PluginCapability_Service{ + Type: csipbv1.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + }, + }, + ExpectedResponse: &PluginCapabilitySet{hasControllerService: true}, + }, + { + Name: "HasTopologies is true when it's part of the response", + Response: &csipbv1.GetPluginCapabilitiesResponse{ + Capabilities: []*csipbv1.PluginCapability{ + { + Type: &csipbv1.PluginCapability_Service_{ + Service: &csipbv1.PluginCapability_Service{ + Type: csipbv1.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS, + }, + }, + }, + }, + }, + ExpectedResponse: &PluginCapabilitySet{hasTopologies: true}, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + ic, client := newTestClient() + defer client.Close() + + ic.NextErr = c.ResponseErr + ic.NextPluginCapabilities = c.Response + + resp, err := client.PluginGetCapabilities(context.TODO()) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } + + require.Equal(t, c.ExpectedResponse, resp) + }) + } + +} diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go new file mode 100644 index 000000000..dc8477363 --- /dev/null +++ b/plugins/csi/fake/client.go @@ -0,0 +1,112 @@ +// fake is a package that includes fake implementations of public interfaces +// from the CSI package for testing. +package fake + +import ( + "context" + "errors" + "sync" + + "github.com/hashicorp/nomad/plugins/base" + "github.com/hashicorp/nomad/plugins/csi" + "github.com/hashicorp/nomad/plugins/shared/hclspec" +) + +var _ csi.CSIPlugin = &Client{} + +// Client is a mock implementation of the csi.CSIPlugin interface for use in testing +// external components +type Client struct { + Mu sync.RWMutex + + NextPluginInfoResponse *base.PluginInfoResponse + NextPluginInfoErr error + PluginInfoCallCount int64 + + NextPluginProbeResponse bool + NextPluginProbeErr error + PluginProbeCallCount int64 + + NextPluginGetInfoResponse string + NextPluginGetInfoErr error + PluginGetInfoCallCount int64 + + NextPluginGetCapabilitiesResponse *csi.PluginCapabilitySet + NextPluginGetCapabilitiesErr error + PluginGetCapabilitiesCallCount int64 + + NextNodeGetInfoResponse *csi.NodeGetInfoResponse + NextNodeGetInfoErr error + NodeGetInfoCallCount int64 +} + +// PluginInfo describes the type and version of a plugin. +func (c *Client) PluginInfo() (*base.PluginInfoResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginInfoCallCount++ + + return c.NextPluginInfoResponse, c.NextPluginInfoErr +} + +// ConfigSchema returns the schema for parsing the plugins configuration. +func (c *Client) ConfigSchema() (*hclspec.Spec, error) { + return nil, errors.New("Unsupported") +} + +// SetConfig is used to set the configuration by passing a MessagePack +// encoding of it. +func (c *Client) SetConfig(a *base.Config) error { + return errors.New("Unsupported") +} + +// PluginProbe is used to verify that the plugin is in a healthy state +func (c *Client) PluginProbe(ctx context.Context) (bool, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginProbeCallCount++ + + return c.NextPluginProbeResponse, c.NextPluginProbeErr +} + +// PluginGetInfo is used to return semantic data about the plugin. +// Response: +// - string: name, the name of the plugin in domain notation format. +func (c *Client) PluginGetInfo(ctx context.Context) (string, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginGetInfoCallCount++ + + return c.NextPluginGetInfoResponse, c.NextPluginGetInfoErr +} + +// PluginGetCapabilities is used to return the available capabilities from the +// identity service. This currently only looks for the CONTROLLER_SERVICE and +// Accessible Topology Support +func (c *Client) PluginGetCapabilities(ctx context.Context) (*csi.PluginCapabilitySet, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.PluginGetCapabilitiesCallCount++ + + return c.NextPluginGetCapabilitiesResponse, c.NextPluginGetCapabilitiesErr +} + +// NodeGetInfo is used to return semantic data about the current node in +// respect to the SP. +func (c *Client) NodeGetInfo(ctx context.Context) (*csi.NodeGetInfoResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeGetInfoCallCount++ + + return c.NextNodeGetInfoResponse, c.NextNodeGetInfoErr +} + +// Shutdown the client and ensure any connections are cleaned up. +func (c *Client) Close() error { + return nil +} diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go new file mode 100644 index 000000000..646c0b5f9 --- /dev/null +++ b/plugins/csi/plugin.go @@ -0,0 +1,85 @@ +package csi + +import ( + "context" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/hashicorp/nomad/plugins/base" +) + +// CSIPlugin implements a lightweight abstraction layer around a CSI Plugin. +// It validates that responses from storage providers (SP's), correctly conform +// to the specification before returning response data or erroring. +type CSIPlugin interface { + base.BasePlugin + + // PluginProbe is used to verify that the plugin is in a healthy state + PluginProbe(ctx context.Context) (bool, error) + + // PluginGetInfo is used to return semantic data about the plugin. + // Response: + // - string: name, the name of the plugin in domain notation format. + PluginGetInfo(ctx context.Context) (string, error) + + // PluginGetCapabilities is used to return the available capabilities from the + // identity service. This currently only looks for the CONTROLLER_SERVICE and + // Accessible Topology Support + PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySet, error) + + // NodeGetInfo is used to return semantic data about the current node in + // respect to the SP. + NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) + + // Shutdown the client and ensure any connections are cleaned up. + Close() error +} + +type PluginCapabilitySet struct { + hasControllerService bool + hasTopologies bool +} + +func (p *PluginCapabilitySet) HasControllerService() bool { + return p.hasControllerService +} + +// HasTopologies indicates whether the volumes for this plugin are equally +// accessible by all nodes in the cluster. +// If true, we MUST use the topology information when scheduling workloads. +func (p *PluginCapabilitySet) HasToplogies() bool { + return p.hasTopologies +} + +func (p *PluginCapabilitySet) IsEqual(o *PluginCapabilitySet) bool { + return p.hasControllerService == o.hasControllerService && p.hasTopologies == o.hasTopologies +} + +func NewTestPluginCapabilitySet(topologies, controller bool) *PluginCapabilitySet { + return &PluginCapabilitySet{ + hasTopologies: topologies, + hasControllerService: controller, + } +} + +func NewPluginCapabilitySet(capabilities *csipbv1.GetPluginCapabilitiesResponse) *PluginCapabilitySet { + cs := &PluginCapabilitySet{} + + pluginCapabilities := capabilities.GetCapabilities() + + for _, pcap := range pluginCapabilities { + if svcCap := pcap.GetService(); svcCap != nil { + switch svcCap.Type { + case csipbv1.PluginCapability_Service_UNKNOWN: + continue + case csipbv1.PluginCapability_Service_CONTROLLER_SERVICE: + cs.hasControllerService = true + case csipbv1.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS: + cs.hasTopologies = true + default: + continue + } + } + } + + return cs +} diff --git a/plugins/csi/testing/client.go b/plugins/csi/testing/client.go new file mode 100644 index 000000000..a84841be4 --- /dev/null +++ b/plugins/csi/testing/client.go @@ -0,0 +1,43 @@ +package testing + +import ( + "context" + + csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" +) + +// IdentityClient is a CSI identity client used for testing +type IdentityClient struct { + NextErr error + NextPluginInfo *csipbv1.GetPluginInfoResponse + NextPluginCapabilities *csipbv1.GetPluginCapabilitiesResponse + NextPluginProbe *csipbv1.ProbeResponse +} + +// NewIdentityClient returns a new IdentityClient +func NewIdentityClient() *IdentityClient { + return &IdentityClient{} +} + +func (f *IdentityClient) Reset() { + f.NextErr = nil + f.NextPluginInfo = nil + f.NextPluginCapabilities = nil + f.NextPluginProbe = nil +} + +// GetPluginInfo returns plugin info +func (f *IdentityClient) GetPluginInfo(ctx context.Context, in *csipbv1.GetPluginInfoRequest, opts ...grpc.CallOption) (*csipbv1.GetPluginInfoResponse, error) { + return f.NextPluginInfo, f.NextErr +} + +// GetPluginCapabilities implements csi method +func (f *IdentityClient) GetPluginCapabilities(ctx context.Context, in *csipbv1.GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.GetPluginCapabilitiesResponse, error) { + return f.NextPluginCapabilities, f.NextErr +} + +// Probe implements csi method +func (f *IdentityClient) Probe(ctx context.Context, in *csipbv1.ProbeRequest, opts ...grpc.CallOption) (*csipbv1.ProbeResponse, error) { + return f.NextPluginProbe, f.NextErr +} From 72ee2d4c1cfae51b2a25ee395ef28cce2e55fee7 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Mon, 16 Dec 2019 13:31:09 +0100 Subject: [PATCH 005/126] csi: Add initial plumbing for controller rpcs --- plugins/csi/client.go | 45 ++++++++++++++++++++++- plugins/csi/client_test.go | 68 +++++++++++++++++++++++++++++++---- plugins/csi/fake/client.go | 14 ++++++++ plugins/csi/plugin.go | 15 ++++++++ plugins/csi/testing/client.go | 36 +++++++++++++++++++ 5 files changed, 170 insertions(+), 8 deletions(-) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 5647eeba7..83a2c7c30 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -7,6 +7,7 @@ import ( "time" csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/shared/hclspec" "google.golang.org/grpc" @@ -50,10 +51,19 @@ type Topology struct { Segments map[string]string } +// CSIControllerClient defines the minimal CSI Controller Plugin interface used +// by nomad to simplify the interface required for testing. +type CSIControllerClient interface { + ControllerGetCapabilities(ctx context.Context, in *csipbv1.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ControllerGetCapabilitiesResponse, error) + ControllerPublishVolume(ctx context.Context, in *csipbv1.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *csipbv1.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *csipbv1.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ValidateVolumeCapabilitiesResponse, error) +} + type client struct { conn *grpc.ClientConn identityClient csipbv1.IdentityClient - controllerClient csipbv1.ControllerClient + controllerClient CSIControllerClient nodeClient csipbv1.NodeClient } @@ -184,6 +194,39 @@ func (c *client) PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySe return NewPluginCapabilitySet(resp), nil } +// +// Controller Endpoints +// + +func (c *client) ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.controllerClient == nil { + return nil, fmt.Errorf("controllerClient not initialized") + } + + pbrequest := &csipbv1.ControllerPublishVolumeRequest{ + VolumeId: req.VolumeID, + NodeId: req.NodeID, + Readonly: req.ReadOnly, + //TODO: add capabilities + } + + resp, err := c.controllerClient.ControllerPublishVolume(ctx, pbrequest) + if err != nil { + return nil, err + } + + return &ControllerPublishVolumeResponse{ + PublishContext: helper.CopyMapStringString(resp.PublishContext), + }, nil +} + +// +// Node Endpoints +// + func (c *client) NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) { if c == nil { return nil, fmt.Errorf("Client not initialized") diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 882eacfad..5832aeb5e 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -11,13 +11,15 @@ import ( "github.com/stretchr/testify/require" ) -func newTestClient() (*fake.IdentityClient, CSIPlugin) { +func newTestClient() (*fake.IdentityClient, *fake.ControllerClient, CSIPlugin) { ic := &fake.IdentityClient{} + cc := &fake.ControllerClient{} client := &client{ - identityClient: ic, + identityClient: ic, + controllerClient: cc, } - return ic, client + return ic, cc, client } func TestClient_RPC_PluginProbe(t *testing.T) { @@ -61,7 +63,7 @@ func TestClient_RPC_PluginProbe(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(t *testing.T) { - ic, client := newTestClient() + ic, _, client := newTestClient() defer client.Close() ic.NextErr = c.ResponseErr @@ -109,7 +111,7 @@ func TestClient_RPC_PluginInfo(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(t *testing.T) { - ic, client := newTestClient() + ic, _, client := newTestClient() defer client.Close() ic.NextErr = c.ResponseErr @@ -173,7 +175,7 @@ func TestClient_RPC_PluginGetCapabilities(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(t *testing.T) { - ic, client := newTestClient() + ic, _, client := newTestClient() defer client.Close() ic.NextErr = c.ResponseErr @@ -187,5 +189,57 @@ func TestClient_RPC_PluginGetCapabilities(t *testing.T) { require.Equal(t, c.ExpectedResponse, resp) }) } - +} + +func TestClient_RPC_ControllerPublishVolume(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.ControllerPublishVolumeResponse + ExpectedResponse *ControllerPublishVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "Handles PublishContext == nil", + Response: &csipbv1.ControllerPublishVolumeResponse{}, + ExpectedResponse: &ControllerPublishVolumeResponse{}, + }, + { + Name: "Handles PublishContext != nil", + Response: &csipbv1.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "com.hashicorp/nomad-node-id": "foobar", + "com.plugin/device": "/dev/sdc1", + }, + }, + ExpectedResponse: &ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "com.hashicorp/nomad-node-id": "foobar", + "com.plugin/device": "/dev/sdc1", + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, cc, client := newTestClient() + defer client.Close() + + cc.NextErr = c.ResponseErr + cc.NextPublishVolumeResponse = c.Response + + resp, err := client.ControllerPublishVolume(context.TODO(), &ControllerPublishVolumeRequest{}) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } + + require.Equal(t, c.ExpectedResponse, resp) + }) + } } diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index dc8477363..f3d218ff8 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -35,6 +35,10 @@ type Client struct { NextPluginGetCapabilitiesErr error PluginGetCapabilitiesCallCount int64 + NextControllerPublishVolumeResponse *csi.ControllerPublishVolumeResponse + NextControllerPublishVolumeErr error + ControllerPublishVolumeCallCount int64 + NextNodeGetInfoResponse *csi.NodeGetInfoResponse NextNodeGetInfoErr error NodeGetInfoCallCount int64 @@ -95,6 +99,16 @@ func (c *Client) PluginGetCapabilities(ctx context.Context) (*csi.PluginCapabili return c.NextPluginGetCapabilitiesResponse, c.NextPluginGetCapabilitiesErr } +// ControllerPublishVolume is used to attach a remote volume to a node +func (c *Client) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerPublishVolumeCallCount++ + + return c.NextControllerPublishVolumeResponse, c.NextControllerPublishVolumeErr +} + // NodeGetInfo is used to return semantic data about the current node in // respect to the SP. func (c *Client) NodeGetInfo(ctx context.Context) (*csi.NodeGetInfoResponse, error) { diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 646c0b5f9..012313670 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -26,6 +26,9 @@ type CSIPlugin interface { // Accessible Topology Support PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySet, error) + // ControllerPublishVolume is used to attach a remote volume to a cluster node. + ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + // NodeGetInfo is used to return semantic data about the current node in // respect to the SP. NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) @@ -83,3 +86,15 @@ func NewPluginCapabilitySet(capabilities *csipbv1.GetPluginCapabilitiesResponse) return cs } + +type ControllerPublishVolumeRequest struct { + VolumeID string + NodeID string + ReadOnly bool + + //TODO: Add Capabilities +} + +type ControllerPublishVolumeResponse struct { + PublishContext map[string]string +} diff --git a/plugins/csi/testing/client.go b/plugins/csi/testing/client.go index a84841be4..95739de46 100644 --- a/plugins/csi/testing/client.go +++ b/plugins/csi/testing/client.go @@ -41,3 +41,39 @@ func (f *IdentityClient) GetPluginCapabilities(ctx context.Context, in *csipbv1. func (f *IdentityClient) Probe(ctx context.Context, in *csipbv1.ProbeRequest, opts ...grpc.CallOption) (*csipbv1.ProbeResponse, error) { return f.NextPluginProbe, f.NextErr } + +// ControllerClient is a CSI controller client used for testing +type ControllerClient struct { + NextErr error + NextCapabilitiesResponse *csipbv1.ControllerGetCapabilitiesResponse + NextPublishVolumeResponse *csipbv1.ControllerPublishVolumeResponse + NextUnpublishVolumeResponse *csipbv1.ControllerUnpublishVolumeResponse +} + +// NewControllerClient returns a new ControllerClient +func NewControllerClient() *ControllerClient { + return &ControllerClient{} +} + +func (f *ControllerClient) Reset() { + f.NextErr = nil + f.NextCapabilitiesResponse = nil + f.NextPublishVolumeResponse = nil + f.NextUnpublishVolumeResponse = nil +} + +func (c *ControllerClient) ControllerGetCapabilities(ctx context.Context, in *csipbv1.ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ControllerGetCapabilitiesResponse, error) { + return c.NextCapabilitiesResponse, c.NextErr +} + +func (c *ControllerClient) ControllerPublishVolume(ctx context.Context, in *csipbv1.ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerPublishVolumeResponse, error) { + return c.NextPublishVolumeResponse, c.NextErr +} + +func (c *ControllerClient) ControllerUnpublishVolume(ctx context.Context, in *csipbv1.ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.ControllerUnpublishVolumeResponse, error) { + return c.NextUnpublishVolumeResponse, c.NextErr +} + +func (c *ControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *csipbv1.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ValidateVolumeCapabilitiesResponse, error) { + panic("not implemented") // TODO: Implement +} From 2c29b1c53dbd5ddca8c3cf5d3b0b64c61a51209e Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Mon, 16 Dec 2019 14:19:59 +0100 Subject: [PATCH 006/126] client: Setup CSI RPC Endpoint This commit introduces a new set of endpoints to a Nomad Client: ClientCSI. ClientCSI is responsible for mediating requests from a Nomad Server to a CSI Plugin running on a Nomad Client. It should only really be used to make controller RPCs. --- client/client_csi_endpoint.go | 81 +++++++++++++++++++ client/client_csi_endpoint_test.go | 121 +++++++++++++++++++++++++++++ client/dynamicplugins/registry.go | 40 +++++++++- client/rpc.go | 3 + client/structs/csi.go | 58 ++++++++++++++ 5 files changed, 300 insertions(+), 3 deletions(-) create mode 100644 client/client_csi_endpoint.go create mode 100644 client/client_csi_endpoint_test.go create mode 100644 client/structs/csi.go diff --git a/client/client_csi_endpoint.go b/client/client_csi_endpoint.go new file mode 100644 index 000000000..73c67ab9d --- /dev/null +++ b/client/client_csi_endpoint.go @@ -0,0 +1,81 @@ +package client + +import ( + "context" + "errors" + "time" + + metrics "github.com/armon/go-metrics" + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/plugins/csi" +) + +// ClientCSI endpoint is used for interacting with CSI plugins on a client. +// TODO: Submit metrics with labels to allow debugging per plugin perf problems. +type ClientCSI struct { + c *Client +} + +const ( + // CSIPluginRequestTimeout is the timeout that should be used when making reqs + // against CSI Plugins. It is copied from Kubernetes as an initial seed value. + // https://github.com/kubernetes/kubernetes/blob/e680ad7156f263a6d8129cc0117fda58602e50ad/pkg/volume/csi/csi_plugin.go#L52 + CSIPluginRequestTimeout = 2 * time.Minute +) + +var ( + ErrPluginTypeError = errors.New("CSI Plugin loaded incorrectly") +) + +// CSIControllerPublishVolume is used to attach a volume from a CSI Cluster to +// the storage node provided in the request. +func (c *ClientCSI) CSIControllerPublishVolume(req *structs.ClientCSIControllerPublishVolumeRequest, resp *structs.ClientCSIControllerPublishVolumeResponse) error { + defer metrics.MeasureSince([]string{"client", "csi_controller", "publish_volume"}, time.Now()) + client, err := c.findControllerPlugin(req.PluginName) + if err != nil { + return err + } + defer client.Close() + + if req.VolumeID == "" { + return errors.New("VolumeID is required") + } + + if req.NodeID == "" { + return errors.New("NodeID is required") + } + + ctx, cancelFn := c.requestContext() + defer cancelFn() + cresp, err := client.ControllerPublishVolume(ctx, req.ToCSIRequest()) + if err != nil { + return err + } + + resp.PublishContext = cresp.PublishContext + return nil +} + +func (c *ClientCSI) findControllerPlugin(name string) (csi.CSIPlugin, error) { + return c.findPlugin(dynamicplugins.PluginTypeCSIController, name) +} + +// TODO: Cache Plugin Clients? +func (c *ClientCSI) findPlugin(ptype, name string) (csi.CSIPlugin, error) { + pIface, err := c.c.dynamicRegistry.DispensePlugin(ptype, name) + if err != nil { + return nil, err + } + + plugin, ok := pIface.(csi.CSIPlugin) + if !ok { + return nil, ErrPluginTypeError + } + + return plugin, nil +} + +func (c *ClientCSI) requestContext() (context.Context, context.CancelFunc) { + return context.WithTimeout(context.Background(), CSIPluginRequestTimeout) +} diff --git a/client/client_csi_endpoint_test.go b/client/client_csi_endpoint_test.go new file mode 100644 index 000000000..f349bbe0e --- /dev/null +++ b/client/client_csi_endpoint_test.go @@ -0,0 +1,121 @@ +package client + +import ( + "errors" + "testing" + + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/plugins/csi" + "github.com/hashicorp/nomad/plugins/csi/fake" + "github.com/stretchr/testify/require" +) + +var fakePlugin = &dynamicplugins.PluginInfo{ + Name: "test-plugin", + Type: "csi-controller", + ConnectionInfo: &dynamicplugins.PluginConnectionInfo{}, +} + +func TestClientCSI_CSIControllerPublishVolume(t *testing.T) { + t.Parallel() + + cases := []struct { + Name string + ClientSetupFunc func(*fake.Client) + Request *structs.ClientCSIControllerPublishVolumeRequest + ExpectedErr error + ExpectedResponse *structs.ClientCSIControllerPublishVolumeResponse + }{ + { + Name: "returns plugin not found errors", + Request: &structs.ClientCSIControllerPublishVolumeRequest{ + PluginName: "some-garbage", + }, + ExpectedErr: errors.New("plugin some-garbage for type csi-controller not found"), + }, + { + Name: "validates volumeid is not empty", + Request: &structs.ClientCSIControllerPublishVolumeRequest{ + PluginName: fakePlugin.Name, + }, + ExpectedErr: errors.New("VolumeID is required"), + }, + { + Name: "validates nodeid is not empty", + Request: &structs.ClientCSIControllerPublishVolumeRequest{ + PluginName: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + }, + ExpectedErr: errors.New("NodeID is required"), + }, + { + Name: "returns transitive errors", + ClientSetupFunc: func(fc *fake.Client) { + fc.NextControllerPublishVolumeErr = errors.New("hello") + }, + Request: &structs.ClientCSIControllerPublishVolumeRequest{ + PluginName: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + NodeID: "abcde", + }, + ExpectedErr: errors.New("hello"), + }, + { + Name: "handles nil PublishContext", + ClientSetupFunc: func(fc *fake.Client) { + fc.NextControllerPublishVolumeResponse = &csi.ControllerPublishVolumeResponse{} + }, + Request: &structs.ClientCSIControllerPublishVolumeRequest{ + PluginName: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + NodeID: "abcde", + }, + ExpectedResponse: &structs.ClientCSIControllerPublishVolumeResponse{}, + }, + { + Name: "handles non-nil PublishContext", + ClientSetupFunc: func(fc *fake.Client) { + fc.NextControllerPublishVolumeResponse = &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{"foo": "bar"}, + } + }, + Request: &structs.ClientCSIControllerPublishVolumeRequest{ + PluginName: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + NodeID: "abcde", + }, + ExpectedResponse: &structs.ClientCSIControllerPublishVolumeResponse{ + PublishContext: map[string]string{"foo": "bar"}, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + require := require.New(t) + client, cleanup := TestClient(t, nil) + defer cleanup() + + fakeClient := &fake.Client{} + if tc.ClientSetupFunc != nil { + tc.ClientSetupFunc(fakeClient) + } + + dispenserFunc := func(*dynamicplugins.PluginInfo) (interface{}, error) { + return fakeClient, nil + } + client.dynamicRegistry.StubDispenserForType(dynamicplugins.PluginTypeCSIController, dispenserFunc) + + err := client.dynamicRegistry.RegisterPlugin(fakePlugin) + require.Nil(err) + + var resp structs.ClientCSIControllerPublishVolumeResponse + err = client.ClientRPC("ClientCSI.CSIControllerPublishVolume", tc.Request, &resp) + require.Equal(tc.ExpectedErr, err) + if tc.ExpectedResponse != nil { + require.Equal(tc.ExpectedResponse, &resp) + } + }) + } +} diff --git a/client/dynamicplugins/registry.go b/client/dynamicplugins/registry.go index b1aa06130..ea7bebc26 100644 --- a/client/dynamicplugins/registry.go +++ b/client/dynamicplugins/registry.go @@ -27,6 +27,8 @@ type Registry interface { PluginsUpdatedCh(ctx context.Context, ptype string) <-chan *PluginUpdateEvent Shutdown() + + StubDispenserForType(ptype string, dispenser PluginDispenser) } type PluginDispenser func(info *PluginInfo) (interface{}, error) @@ -87,7 +89,30 @@ type dynamicRegistry struct { broadcasters map[string]*pluginEventBroadcaster broadcastersLock sync.Mutex - dispensers map[string]PluginDispenser + dispensers map[string]PluginDispenser + stubDispensers map[string]PluginDispenser +} + +// StubDispenserForType allows test functions to provide alternative plugin +// dispensers to simplify writing tests for higher level Nomad features. +// This function should not be called from production code. +func (d *dynamicRegistry) StubDispenserForType(ptype string, dispenser PluginDispenser) { + // delete from stubs + if dispenser == nil && d.stubDispensers != nil { + delete(d.stubDispensers, ptype) + if len(d.stubDispensers) == 0 { + d.stubDispensers = nil + } + + return + } + + // setup stubs + if d.stubDispensers == nil { + d.stubDispensers = make(map[string]PluginDispenser, 1) + } + + d.stubDispensers[ptype] = dispenser } func (d *dynamicRegistry) RegisterPlugin(info *PluginInfo) error { @@ -206,12 +231,12 @@ func (d *dynamicRegistry) DispensePlugin(ptype string, name string) (interface{} if ptype == "" { // This error shouldn't make it to a production cluster and is to aid // developers during the development of new plugin types. - return nil, errors.New("must specify plugin type to deregister") + return nil, errors.New("must specify plugin type to dispense") } if name == "" { // This error shouldn't make it to a production cluster and is to aid // developers during the development of new plugin types. - return nil, errors.New("must specify plugin name to deregister") + return nil, errors.New("must specify plugin name to dispense") } dispenseFunc, ok := d.dispensers[ptype] @@ -221,6 +246,15 @@ func (d *dynamicRegistry) DispensePlugin(ptype string, name string) (interface{} return nil, fmt.Errorf("no plugin dispenser found for type: %s", ptype) } + // After initially loading the dispenser (to avoid masking missing setup in + // client/client.go), we then check to see if we have any stub dispensers for + // this plugin type. If we do, then replace the dispenser fn with the stub. + if d.stubDispensers != nil { + if stub, ok := d.stubDispensers[ptype]; ok { + dispenseFunc = stub + } + } + pmap, ok := d.plugins[ptype] if !ok { return nil, fmt.Errorf("no plugins registered for type: %s", ptype) diff --git a/client/rpc.go b/client/rpc.go index b502587cf..31589e3e8 100644 --- a/client/rpc.go +++ b/client/rpc.go @@ -21,6 +21,7 @@ import ( // rpcEndpoints holds the RPC endpoints type rpcEndpoints struct { ClientStats *ClientStats + ClientCSI *ClientCSI FileSystem *FileSystem Allocations *Allocations Agent *Agent @@ -217,6 +218,7 @@ func (c *Client) streamingRpcConn(server *servers.Server, method string) (net.Co func (c *Client) setupClientRpc() { // Initialize the RPC handlers c.endpoints.ClientStats = &ClientStats{c} + c.endpoints.ClientCSI = &ClientCSI{c} c.endpoints.FileSystem = NewFileSystemEndpoint(c) c.endpoints.Allocations = NewAllocationsEndpoint(c) c.endpoints.Agent = NewAgentEndpoint(c) @@ -234,6 +236,7 @@ func (c *Client) setupClientRpc() { func (c *Client) setupClientRpcServer(server *rpc.Server) { // Register the endpoints server.Register(c.endpoints.ClientStats) + server.Register(c.endpoints.ClientCSI) server.Register(c.endpoints.FileSystem) server.Register(c.endpoints.Allocations) server.Register(c.endpoints.Agent) diff --git a/client/structs/csi.go b/client/structs/csi.go new file mode 100644 index 000000000..f262a3fdf --- /dev/null +++ b/client/structs/csi.go @@ -0,0 +1,58 @@ +package structs + +import "github.com/hashicorp/nomad/plugins/csi" + +type ClientCSIControllerPublishVolumeRequest struct { + PluginName string + + // The ID of the volume to be used on a node. + // This field is REQUIRED. + VolumeID string + + // The ID of the node. This field is REQUIRED. This must match the NodeID that + // is fingerprinted by the target node for this plugin name. + NodeID string + + // TODO: Fingerprint the following correctly: + + // Volume capability describing how the CO intends to use this volume. + // SP MUST ensure the CO can use the published volume as described. + // Otherwise SP MUST return the appropriate gRPC error code. + // This is a REQUIRED field. + // VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + + // Indicates SP MUST publish the volume in readonly mode. + // CO MUST set this field to false if SP does not have the + // PUBLISH_READONLY controller capability. + // This is a REQUIRED field. + ReadOnly bool +} + +func (c *ClientCSIControllerPublishVolumeRequest) ToCSIRequest() *csi.ControllerPublishVolumeRequest { + if c == nil { + return &csi.ControllerPublishVolumeRequest{} + } + + return &csi.ControllerPublishVolumeRequest{ + VolumeID: c.VolumeID, + NodeID: c.NodeID, + ReadOnly: c.ReadOnly, + } +} + +type ClientCSIControllerPublishVolumeResponse struct { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to nomad. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the nomad to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // subsequent `NodeStageVolume` or `NodePublishVolume` calls + PublishContext map[string]string +} From 259852b05fe26c196d6896672272fe0687e04ebd Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 17 Dec 2019 12:09:57 +0100 Subject: [PATCH 007/126] csi: Model Attachment and Access modes --- client/structs/csi.go | 83 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 73 insertions(+), 10 deletions(-) diff --git a/client/structs/csi.go b/client/structs/csi.go index f262a3fdf..d383ccb30 100644 --- a/client/structs/csi.go +++ b/client/structs/csi.go @@ -2,6 +2,68 @@ package structs import "github.com/hashicorp/nomad/plugins/csi" +// CSIVolumeAttachmentMode chooses the type of storage api that will be used to +// interact with the device. +type CSIVolumeAttachmentMode string + +const ( + CSIVolumeAttachmentModeUnknown CSIVolumeAttachmentMode = "" + CSIVolumeAttachmentModeBlockDevice CSIVolumeAttachmentMode = "block-device" + CSIVolumeAttachmentModeFilesystem CSIVolumeAttachmentMode = "file-system" +) + +func ValidCSIVolumeAttachmentMode(attachmentMode CSIVolumeAttachmentMode) bool { + switch attachmentMode { + case CSIVolumeAttachmentModeBlockDevice, CSIVolumeAttachmentModeFilesystem: + return true + default: + return false + } +} + +// CSIVolumeAccessMode indicates how a volume should be used in a storage topology +// e.g whether the provider should make the volume available concurrently. +type CSIVolumeAccessMode string + +const ( + CSIVolumeAccessModeUnknown CSIVolumeAccessMode = "" + + CSIVolumeAccessModeSingleNodeReader CSIVolumeAccessMode = "single-node-reader-only" + CSIVolumeAccessModeSingleNodeWriter CSIVolumeAccessMode = "single-node-writer" + + CSIVolumeAccessModeMultiNodeReader CSIVolumeAccessMode = "multi-node-reader-only" + CSIVolumeAccessModeMultiNodeSingleWriter CSIVolumeAccessMode = "multi-node-single-writer" + CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" +) + +// ValidCSIVolumeAccessMode checks to see that the provided access mode is a valid, +// non-empty access mode. +func ValidCSIVolumeAccessMode(accessMode CSIVolumeAccessMode) bool { + switch accessMode { + case CSIVolumeAccessModeSingleNodeReader, CSIVolumeAccessModeSingleNodeWriter, + CSIVolumeAccessModeMultiNodeReader, CSIVolumeAccessModeMultiNodeSingleWriter, + CSIVolumeAccessModeMultiNodeMultiWriter: + return true + default: + return false + } +} + +// CSIVolumeMountOptions contains the mount options that should be provided when +// attaching and mounting a volume with the CSIVolumeAttachmentModeFilesystem +// attachment mode. +type CSIVolumeMountOptions struct { + // Filesystem is the desired filesystem type that should be used by the volume + // (e.g ext4, aufs, zfs). This field is optional. + Filesystem string + + // MountFlags contain the mount options that should be used for the volume. + // These may contain _sensitive_ data and should not be leaked to logs or + // returned in debugging data. + // The total size of this field must be under 4KiB. + MountFlags []string +} + type ClientCSIControllerPublishVolumeRequest struct { PluginName string @@ -13,18 +75,19 @@ type ClientCSIControllerPublishVolumeRequest struct { // is fingerprinted by the target node for this plugin name. NodeID string - // TODO: Fingerprint the following correctly: + // AttachmentMode indicates how the volume should be attached and mounted into + // a task. + AttachmentMode CSIVolumeAttachmentMode - // Volume capability describing how the CO intends to use this volume. - // SP MUST ensure the CO can use the published volume as described. - // Otherwise SP MUST return the appropriate gRPC error code. - // This is a REQUIRED field. - // VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability,proto3" json:"volume_capability,omitempty"` + // AccessMode indicates the desired concurrent access model for the volume + AccessMode CSIVolumeAccessMode - // Indicates SP MUST publish the volume in readonly mode. - // CO MUST set this field to false if SP does not have the - // PUBLISH_READONLY controller capability. - // This is a REQUIRED field. + // MountOptions is an optional field that contains additional configuration + // when providing an AttachmentMode of CSIVolumeAttachmentModeFilesystem + MountOptions *CSIVolumeMountOptions + + // ReadOnly indicates that the volume will be used in a readonly fashion. This + // only works when the Controller has the PublishReadonly capability. ReadOnly bool } From 2fc65371a852b5dae5604fd544fe436fcb84d5ba Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 17 Dec 2019 12:37:33 +0100 Subject: [PATCH 008/126] csi: ClientCSIControllerPublish* -> ClientCSIControllerAttach* --- client/client_csi_endpoint.go | 10 ++++-- client/client_csi_endpoint_test.go | 55 +++++++++++++++++++++--------- client/structs/csi.go | 6 ++-- 3 files changed, 50 insertions(+), 21 deletions(-) diff --git a/client/client_csi_endpoint.go b/client/client_csi_endpoint.go index 73c67ab9d..dcc57fc69 100644 --- a/client/client_csi_endpoint.go +++ b/client/client_csi_endpoint.go @@ -28,9 +28,15 @@ var ( ErrPluginTypeError = errors.New("CSI Plugin loaded incorrectly") ) -// CSIControllerPublishVolume is used to attach a volume from a CSI Cluster to +// CSIControllerAttachVolume is used to attach a volume from a CSI Cluster to // the storage node provided in the request. -func (c *ClientCSI) CSIControllerPublishVolume(req *structs.ClientCSIControllerPublishVolumeRequest, resp *structs.ClientCSIControllerPublishVolumeResponse) error { +// +// The controller attachment flow currently works as follows: +// 1. Validate the volume request +// 2. Call ControllerPublishVolume on the CSI Plugin to trigger a remote attachment +// +// In the future this may be expanded to request dynamic secrets for attachement. +func (c *ClientCSI) CSIControllerAttachVolume(req *structs.ClientCSIControllerAttachVolumeRequest, resp *structs.ClientCSIControllerAttachVolumeResponse) error { defer metrics.MeasureSince([]string{"client", "csi_controller", "publish_volume"}, time.Now()) client, err := c.findControllerPlugin(req.PluginName) if err != nil { diff --git a/client/client_csi_endpoint_test.go b/client/client_csi_endpoint_test.go index f349bbe0e..8d07b88c6 100644 --- a/client/client_csi_endpoint_test.go +++ b/client/client_csi_endpoint_test.go @@ -17,47 +17,70 @@ var fakePlugin = &dynamicplugins.PluginInfo{ ConnectionInfo: &dynamicplugins.PluginConnectionInfo{}, } -func TestClientCSI_CSIControllerPublishVolume(t *testing.T) { +func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { t.Parallel() cases := []struct { Name string ClientSetupFunc func(*fake.Client) - Request *structs.ClientCSIControllerPublishVolumeRequest + Request *structs.ClientCSIControllerAttachVolumeRequest ExpectedErr error - ExpectedResponse *structs.ClientCSIControllerPublishVolumeResponse + ExpectedResponse *structs.ClientCSIControllerAttachVolumeResponse }{ { Name: "returns plugin not found errors", - Request: &structs.ClientCSIControllerPublishVolumeRequest{ + Request: &structs.ClientCSIControllerAttachVolumeRequest{ PluginName: "some-garbage", }, ExpectedErr: errors.New("plugin some-garbage for type csi-controller not found"), }, { Name: "validates volumeid is not empty", - Request: &structs.ClientCSIControllerPublishVolumeRequest{ + Request: &structs.ClientCSIControllerAttachVolumeRequest{ PluginName: fakePlugin.Name, }, ExpectedErr: errors.New("VolumeID is required"), }, { Name: "validates nodeid is not empty", - Request: &structs.ClientCSIControllerPublishVolumeRequest{ + Request: &structs.ClientCSIControllerAttachVolumeRequest{ PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", }, ExpectedErr: errors.New("NodeID is required"), }, + { + Name: "validates AccessMode", + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + PluginName: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + NodeID: "abcde", + AccessMode: structs.CSIVolumeAccessMode("foo"), + }, + ExpectedErr: errors.New("Unknown access mode: foo"), + }, + { + Name: "validates attachmentmode is not empty", + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + PluginName: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + NodeID: "abcde", + AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: structs.CSIVolumeAttachmentMode("bar"), + }, + ExpectedErr: errors.New("Unknown attachment mode: bar"), + }, { Name: "returns transitive errors", ClientSetupFunc: func(fc *fake.Client) { fc.NextControllerPublishVolumeErr = errors.New("hello") }, - Request: &structs.ClientCSIControllerPublishVolumeRequest{ - PluginName: fakePlugin.Name, - VolumeID: "1234-4321-1234-4321", - NodeID: "abcde", + Request: &structs.ClientCSIControllerAttachVolumeRequest{ + PluginName: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + NodeID: "abcde", + AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }, ExpectedErr: errors.New("hello"), }, @@ -66,12 +89,12 @@ func TestClientCSI_CSIControllerPublishVolume(t *testing.T) { ClientSetupFunc: func(fc *fake.Client) { fc.NextControllerPublishVolumeResponse = &csi.ControllerPublishVolumeResponse{} }, - Request: &structs.ClientCSIControllerPublishVolumeRequest{ + Request: &structs.ClientCSIControllerAttachVolumeRequest{ PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", }, - ExpectedResponse: &structs.ClientCSIControllerPublishVolumeResponse{}, + ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{}, }, { Name: "handles non-nil PublishContext", @@ -80,12 +103,12 @@ func TestClientCSI_CSIControllerPublishVolume(t *testing.T) { PublishContext: map[string]string{"foo": "bar"}, } }, - Request: &structs.ClientCSIControllerPublishVolumeRequest{ + Request: &structs.ClientCSIControllerAttachVolumeRequest{ PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", }, - ExpectedResponse: &structs.ClientCSIControllerPublishVolumeResponse{ + ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{ PublishContext: map[string]string{"foo": "bar"}, }, }, @@ -110,8 +133,8 @@ func TestClientCSI_CSIControllerPublishVolume(t *testing.T) { err := client.dynamicRegistry.RegisterPlugin(fakePlugin) require.Nil(err) - var resp structs.ClientCSIControllerPublishVolumeResponse - err = client.ClientRPC("ClientCSI.CSIControllerPublishVolume", tc.Request, &resp) + var resp structs.ClientCSIControllerAttachVolumeResponse + err = client.ClientRPC("ClientCSI.CSIControllerAttachVolume", tc.Request, &resp) require.Equal(tc.ExpectedErr, err) if tc.ExpectedResponse != nil { require.Equal(tc.ExpectedResponse, &resp) diff --git a/client/structs/csi.go b/client/structs/csi.go index d383ccb30..0e37101c0 100644 --- a/client/structs/csi.go +++ b/client/structs/csi.go @@ -64,7 +64,7 @@ type CSIVolumeMountOptions struct { MountFlags []string } -type ClientCSIControllerPublishVolumeRequest struct { +type ClientCSIControllerAttachVolumeRequest struct { PluginName string // The ID of the volume to be used on a node. @@ -91,7 +91,7 @@ type ClientCSIControllerPublishVolumeRequest struct { ReadOnly bool } -func (c *ClientCSIControllerPublishVolumeRequest) ToCSIRequest() *csi.ControllerPublishVolumeRequest { +func (c *ClientCSIControllerAttachVolumeRequest) ToCSIRequest() *csi.ControllerPublishVolumeRequest { if c == nil { return &csi.ControllerPublishVolumeRequest{} } @@ -103,7 +103,7 @@ func (c *ClientCSIControllerPublishVolumeRequest) ToCSIRequest() *csi.Controller } } -type ClientCSIControllerPublishVolumeResponse struct { +type ClientCSIControllerAttachVolumeResponse struct { // Opaque static publish properties of the volume. SP MAY use this // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` // calls calls have contextual information. From 9a23e27439e823a36b3977f877c2baba373e6994 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 17 Dec 2019 12:38:07 +0100 Subject: [PATCH 009/126] client_csi: Validate Access/Attachment modes --- client/client_csi_endpoint.go | 21 ++++++++++++++++++--- client/client_csi_endpoint_test.go | 16 ++++++++++------ 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/client/client_csi_endpoint.go b/client/client_csi_endpoint.go index dcc57fc69..95a3ed893 100644 --- a/client/client_csi_endpoint.go +++ b/client/client_csi_endpoint.go @@ -3,6 +3,7 @@ package client import ( "context" "errors" + "fmt" "time" metrics "github.com/armon/go-metrics" @@ -38,11 +39,16 @@ var ( // In the future this may be expanded to request dynamic secrets for attachement. func (c *ClientCSI) CSIControllerAttachVolume(req *structs.ClientCSIControllerAttachVolumeRequest, resp *structs.ClientCSIControllerAttachVolumeResponse) error { defer metrics.MeasureSince([]string{"client", "csi_controller", "publish_volume"}, time.Now()) - client, err := c.findControllerPlugin(req.PluginName) + plugin, err := c.findControllerPlugin(req.PluginName) if err != nil { return err } - defer client.Close() + defer plugin.Close() + + // The following block of validation checks should not be reached on a + // real Nomad cluster as all of this data should be validated when registering + // volumes with the cluster. They serve as a defensive check before forwarding + // requests to plugins, and to aid with development. if req.VolumeID == "" { return errors.New("VolumeID is required") @@ -52,9 +58,18 @@ func (c *ClientCSI) CSIControllerAttachVolume(req *structs.ClientCSIControllerAt return errors.New("NodeID is required") } + if !structs.ValidCSIVolumeAccessMode(req.AccessMode) { + return fmt.Errorf("Unknown access mode: %v", req.AccessMode) + } + + if !structs.ValidCSIVolumeAttachmentMode(req.AttachmentMode) { + return fmt.Errorf("Unknown attachment mode: %v", req.AttachmentMode) + } + + // Submit the request for a volume to the CSI Plugin. ctx, cancelFn := c.requestContext() defer cancelFn() - cresp, err := client.ControllerPublishVolume(ctx, req.ToCSIRequest()) + cresp, err := plugin.ControllerPublishVolume(ctx, req.ToCSIRequest()) if err != nil { return err } diff --git a/client/client_csi_endpoint_test.go b/client/client_csi_endpoint_test.go index 8d07b88c6..95e1fbf03 100644 --- a/client/client_csi_endpoint_test.go +++ b/client/client_csi_endpoint_test.go @@ -90,9 +90,11 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { fc.NextControllerPublishVolumeResponse = &csi.ControllerPublishVolumeResponse{} }, Request: &structs.ClientCSIControllerAttachVolumeRequest{ - PluginName: fakePlugin.Name, - VolumeID: "1234-4321-1234-4321", - NodeID: "abcde", + PluginName: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + NodeID: "abcde", + AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }, ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{}, }, @@ -104,9 +106,11 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { } }, Request: &structs.ClientCSIControllerAttachVolumeRequest{ - PluginName: fakePlugin.Name, - VolumeID: "1234-4321-1234-4321", - NodeID: "abcde", + PluginName: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + NodeID: "abcde", + AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }, ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{ PublishContext: map[string]string{"foo": "bar"}, From 05525c98ae9092ccb0b7aa0244e86e08cfdd29c5 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 18 Dec 2019 11:57:55 +0100 Subject: [PATCH 010/126] plugins_csi: Add GetControllerCapabilities RPC --- plugins/csi/client.go | 16 +++++++ plugins/csi/client_test.go | 98 ++++++++++++++++++++++++++++++++++++++ plugins/csi/fake/client.go | 13 +++++ plugins/csi/plugin.go | 35 ++++++++++++++ 4 files changed, 162 insertions(+) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 83a2c7c30..dd650d96b 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -198,6 +198,22 @@ func (c *client) PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySe // Controller Endpoints // +func (c *client) ControllerGetCapabilities(ctx context.Context) (*ControllerCapabilitySet, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.controllerClient == nil { + return nil, fmt.Errorf("controllerClient not initialized") + } + + resp, err := c.controllerClient.ControllerGetCapabilities(ctx, &csipbv1.ControllerGetCapabilitiesRequest{}) + if err != nil { + return nil, err + } + + return NewControllerCapabilitySet(resp), nil +} + func (c *client) ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) { if c == nil { return nil, fmt.Errorf("Client not initialized") diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 5832aeb5e..84973b308 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -191,6 +191,104 @@ func TestClient_RPC_PluginGetCapabilities(t *testing.T) { } } +func TestClient_RPC_ControllerGetCapabilities(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.ControllerGetCapabilitiesResponse + ExpectedResponse *ControllerCapabilitySet + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "ignores unknown capabilities", + Response: &csipbv1.ControllerGetCapabilitiesResponse{ + Capabilities: []*csipbv1.ControllerServiceCapability{ + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_GET_CAPACITY, + }, + }, + }, + }, + }, + ExpectedResponse: &ControllerCapabilitySet{}, + }, + { + Name: "detects list volumes capabilities", + Response: &csipbv1.ControllerGetCapabilitiesResponse{ + Capabilities: []*csipbv1.ControllerServiceCapability{ + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES, + }, + }, + }, + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES, + }, + }, + }, + }, + }, + ExpectedResponse: &ControllerCapabilitySet{ + HasListVolumes: true, + HasListVolumesPublishedNodes: true, + }, + }, + { + Name: "detects publish capabilities", + Response: &csipbv1.ControllerGetCapabilitiesResponse{ + Capabilities: []*csipbv1.ControllerServiceCapability{ + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_PUBLISH_READONLY, + }, + }, + }, + { + Type: &csipbv1.ControllerServiceCapability_Rpc{ + Rpc: &csipbv1.ControllerServiceCapability_RPC{ + Type: csipbv1.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }, + }, + }, + }, + }, + ExpectedResponse: &ControllerCapabilitySet{ + HasPublishUnpublishVolume: true, + HasPublishReadonly: true, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, cc, client := newTestClient() + defer client.Close() + + cc.NextErr = tc.ResponseErr + cc.NextCapabilitiesResponse = tc.Response + + resp, err := client.ControllerGetCapabilities(context.TODO()) + if tc.ExpectedErr != nil { + require.Error(t, tc.ExpectedErr, err) + } + + require.Equal(t, tc.ExpectedResponse, resp) + }) + } +} + func TestClient_RPC_ControllerPublishVolume(t *testing.T) { cases := []struct { Name string diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index f3d218ff8..b2372906d 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -35,6 +35,10 @@ type Client struct { NextPluginGetCapabilitiesErr error PluginGetCapabilitiesCallCount int64 + NextControllerGetCapabilitiesResponse *csi.ControllerCapabilitySet + NextControllerGetCapabilitiesErr error + ControllerGetCapabilitiesCallCount int64 + NextControllerPublishVolumeResponse *csi.ControllerPublishVolumeResponse NextControllerPublishVolumeErr error ControllerPublishVolumeCallCount int64 @@ -99,6 +103,15 @@ func (c *Client) PluginGetCapabilities(ctx context.Context) (*csi.PluginCapabili return c.NextPluginGetCapabilitiesResponse, c.NextPluginGetCapabilitiesErr } +func (c *Client) ControllerGetCapabilities(ctx context.Context) (*csi.ControllerCapabilitySet, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerGetCapabilitiesCallCount++ + + return c.NextControllerGetCapabilitiesResponse, c.NextControllerGetCapabilitiesErr +} + // ControllerPublishVolume is used to attach a remote volume to a node func (c *Client) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { c.Mu.Lock() diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 012313670..56a813a28 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -26,6 +26,10 @@ type CSIPlugin interface { // Accessible Topology Support PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySet, error) + // GetControllerCapabilities is used to get controller-specific capabilities + // for a plugin. + ControllerGetCapabilities(ctx context.Context) (*ControllerCapabilitySet, error) + // ControllerPublishVolume is used to attach a remote volume to a cluster node. ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) @@ -87,6 +91,37 @@ func NewPluginCapabilitySet(capabilities *csipbv1.GetPluginCapabilitiesResponse) return cs } +type ControllerCapabilitySet struct { + HasPublishUnpublishVolume bool + HasPublishReadonly bool + HasListVolumes bool + HasListVolumesPublishedNodes bool +} + +func NewControllerCapabilitySet(resp *csipbv1.ControllerGetCapabilitiesResponse) *ControllerCapabilitySet { + cs := &ControllerCapabilitySet{} + + pluginCapabilities := resp.GetCapabilities() + for _, pcap := range pluginCapabilities { + if c := pcap.GetRpc(); c != nil { + switch c.Type { + case csipbv1.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME: + cs.HasPublishUnpublishVolume = true + case csipbv1.ControllerServiceCapability_RPC_PUBLISH_READONLY: + cs.HasPublishReadonly = true + case csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES: + cs.HasListVolumes = true + case csipbv1.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES: + cs.HasListVolumesPublishedNodes = true + default: + continue + } + } + } + + return cs +} + type ControllerPublishVolumeRequest struct { VolumeID string NodeID string From 564f5cec935df5ecb20cc72c563e97f68d1e1c42 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 18 Dec 2019 12:20:32 +0100 Subject: [PATCH 011/126] csimanager: Fingerprint controller capabilities --- client/pluginmanager/csimanager/instance.go | 13 ++++ .../pluginmanager/csimanager/instance_test.go | 76 +++++++++++++++++++ nomad/structs/node.go | 18 ++++- 3 files changed, 106 insertions(+), 1 deletion(-) diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index 9de20ce8b..5313bc07c 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -127,6 +127,13 @@ func (i *instanceManager) runLoop() { } } +func applyCapabilitySetToControllerInfo(cs *csi.ControllerCapabilitySet, info *structs.CSIControllerInfo) { + info.SupportsReadOnlyAttach = cs.HasPublishReadonly + info.SupportsAttachDetach = cs.HasPublishUnpublishVolume + info.SupportsListVolumes = cs.HasListVolumes + info.SupportsListVolumesAttachedNodes = cs.HasListVolumesPublishedNodes +} + func (i *instanceManager) buildControllerFingerprint(ctx context.Context, base *structs.CSIInfo) (*structs.CSIInfo, error) { fp := base.Copy() @@ -136,6 +143,12 @@ func (i *instanceManager) buildControllerFingerprint(ctx context.Context, base * } fp.SetHealthy(healthy) + caps, err := i.client.ControllerGetCapabilities(ctx) + if err != nil { + return fp, err + } + applyCapabilitySetToControllerInfo(caps, fp.ControllerInfo) + return fp, nil } diff --git a/client/pluginmanager/csimanager/instance_test.go b/client/pluginmanager/csimanager/instance_test.go index ca30a321e..ba0e2d572 100644 --- a/client/pluginmanager/csimanager/instance_test.go +++ b/client/pluginmanager/csimanager/instance_test.go @@ -157,3 +157,79 @@ func TestBuildBasicFingerprint_Node(t *testing.T) { }) } } + +func TestBuildControllerFingerprint(t *testing.T) { + tt := []struct { + Name string + + Capabilities *csi.ControllerCapabilitySet + CapabilitiesErr error + CapabilitiesCallCount int64 + + ProbeResponse bool + ProbeErr error + ProbeCallCount int64 + + ExpectedControllerInfo *structs.CSIControllerInfo + ExpectedErr error + }{ + { + Name: "Minimal successful response", + + Capabilities: &csi.ControllerCapabilitySet{}, + CapabilitiesCallCount: 1, + + ProbeResponse: true, + ProbeCallCount: 1, + + ExpectedControllerInfo: &structs.CSIControllerInfo{}, + }, + { + Name: "Successful response with capabilities", + + Capabilities: &csi.ControllerCapabilitySet{ + HasListVolumes: true, + }, + CapabilitiesCallCount: 1, + + ProbeResponse: true, + ProbeCallCount: 1, + + ExpectedControllerInfo: &structs.CSIControllerInfo{ + SupportsListVolumes: true, + }, + }, + { + Name: "ControllerGetCapabilities Failed", + + CapabilitiesErr: errors.New("request failed"), + CapabilitiesCallCount: 1, + + ProbeResponse: true, + ProbeCallCount: 1, + + ExpectedControllerInfo: &structs.CSIControllerInfo{}, + ExpectedErr: errors.New("request failed"), + }, + } + + for _, test := range tt { + t.Run(test.Name, func(t *testing.T) { + client, im := setupTestNodeInstanceManager(t) + + client.NextControllerGetCapabilitiesResponse = test.Capabilities + client.NextControllerGetCapabilitiesErr = test.CapabilitiesErr + + client.NextPluginProbeResponse = test.ProbeResponse + client.NextPluginProbeErr = test.ProbeErr + + info, err := im.buildControllerFingerprint(context.TODO(), &structs.CSIInfo{ControllerInfo: &structs.CSIControllerInfo{}}) + + require.Equal(t, test.ExpectedControllerInfo, info.ControllerInfo) + require.Equal(t, test.ExpectedErr, err) + + require.Equal(t, test.CapabilitiesCallCount, client.ControllerGetCapabilitiesCallCount) + require.Equal(t, test.ProbeCallCount, client.PluginProbeCallCount) + }) + } +} diff --git a/nomad/structs/node.go b/nomad/structs/node.go index 7143c42e2..e5960dbbf 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -100,7 +100,23 @@ func (n *CSINodeInfo) Copy() *CSINodeInfo { // CSIControllerInfo is the fingerprinted data from a CSI Plugin that is specific to // the Controller API. type CSIControllerInfo struct { - // Currently empty + // SupportsReadOnlyAttach is set to true when the controller returns the + // ATTACH_READONLY capability. + SupportsReadOnlyAttach bool + + // SupportsPublishVolume is true when the controller implements the methods + // required to attach and detach volumes. If this is false Nomad should skip + // the controller attachment flow. + SupportsAttachDetach bool + + // SupportsListVolums is true when the controller implements the ListVolumes + // RPC. NOTE: This does not guaruntee that attached nodes will be returned + // unless SupportsListVolumesAttachedNodes is also true. + SupportsListVolumes bool + + // SupportsListVolumesAttachedNodes indicates whether the plugin will return + // attached nodes data when making ListVolume RPCs + SupportsListVolumesAttachedNodes bool } func (c *CSIControllerInfo) Copy() *CSIControllerInfo { From c16812280c4a557742036d23915713163d268b57 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 18 Dec 2019 13:18:07 +0100 Subject: [PATCH 012/126] csi: Add NodeGetCapabilities RPC --- plugins/csi/client.go | 25 ++++++++++- plugins/csi/client_test.go | 85 +++++++++++++++++++++++++++++++---- plugins/csi/fake/client.go | 13 ++++++ plugins/csi/plugin.go | 25 +++++++++++ plugins/csi/testing/client.go | 26 +++++++++++ 5 files changed, 164 insertions(+), 10 deletions(-) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index dd650d96b..8d45a7f71 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -60,11 +60,18 @@ type CSIControllerClient interface { ValidateVolumeCapabilities(ctx context.Context, in *csipbv1.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ValidateVolumeCapabilitiesResponse, error) } +// CSINodeClient defines the minimal CSI Node Plugin interface used +// by nomad to simplify the interface required for testing. +type CSINodeClient interface { + NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) + NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetInfoResponse, error) +} + type client struct { conn *grpc.ClientConn identityClient csipbv1.IdentityClient controllerClient CSIControllerClient - nodeClient csipbv1.NodeClient + nodeClient CSINodeClient } func (c *client) Close() error { @@ -243,6 +250,22 @@ func (c *client) ControllerPublishVolume(ctx context.Context, req *ControllerPub // Node Endpoints // +func (c *client) NodeGetCapabilities(ctx context.Context) (*NodeCapabilitySet, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return nil, fmt.Errorf("Client not initialized") + } + + resp, err := c.nodeClient.NodeGetCapabilities(ctx, &csipbv1.NodeGetCapabilitiesRequest{}) + if err != nil { + return nil, err + } + + return NewNodeCapabilitySet(resp), nil +} + func (c *client) NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) { if c == nil { return nil, fmt.Errorf("Client not initialized") diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 84973b308..1d372ad34 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -11,15 +11,17 @@ import ( "github.com/stretchr/testify/require" ) -func newTestClient() (*fake.IdentityClient, *fake.ControllerClient, CSIPlugin) { - ic := &fake.IdentityClient{} - cc := &fake.ControllerClient{} +func newTestClient() (*fake.IdentityClient, *fake.ControllerClient, *fake.NodeClient, CSIPlugin) { + ic := fake.NewIdentityClient() + cc := fake.NewControllerClient() + nc := fake.NewNodeClient() client := &client{ identityClient: ic, controllerClient: cc, + nodeClient: nc, } - return ic, cc, client + return ic, cc, nc, client } func TestClient_RPC_PluginProbe(t *testing.T) { @@ -63,7 +65,7 @@ func TestClient_RPC_PluginProbe(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(t *testing.T) { - ic, _, client := newTestClient() + ic, _, _, client := newTestClient() defer client.Close() ic.NextErr = c.ResponseErr @@ -111,7 +113,7 @@ func TestClient_RPC_PluginInfo(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(t *testing.T) { - ic, _, client := newTestClient() + ic, _, _, client := newTestClient() defer client.Close() ic.NextErr = c.ResponseErr @@ -175,7 +177,7 @@ func TestClient_RPC_PluginGetCapabilities(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(t *testing.T) { - ic, _, client := newTestClient() + ic, _, _, client := newTestClient() defer client.Close() ic.NextErr = c.ResponseErr @@ -273,7 +275,7 @@ func TestClient_RPC_ControllerGetCapabilities(t *testing.T) { for _, tc := range cases { t.Run(tc.Name, func(t *testing.T) { - _, cc, client := newTestClient() + _, cc, _, client := newTestClient() defer client.Close() cc.NextErr = tc.ResponseErr @@ -289,6 +291,71 @@ func TestClient_RPC_ControllerGetCapabilities(t *testing.T) { } } +func TestClient_RPC_NodeGetCapabilities(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.NodeGetCapabilitiesResponse + ExpectedResponse *NodeCapabilitySet + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "ignores unknown capabilities", + Response: &csipbv1.NodeGetCapabilitiesResponse{ + Capabilities: []*csipbv1.NodeServiceCapability{ + { + Type: &csipbv1.NodeServiceCapability_Rpc{ + Rpc: &csipbv1.NodeServiceCapability_RPC{ + Type: csipbv1.NodeServiceCapability_RPC_EXPAND_VOLUME, + }, + }, + }, + }, + }, + ExpectedResponse: &NodeCapabilitySet{}, + }, + { + Name: "detects stage volumes capability", + Response: &csipbv1.NodeGetCapabilitiesResponse{ + Capabilities: []*csipbv1.NodeServiceCapability{ + { + Type: &csipbv1.NodeServiceCapability_Rpc{ + Rpc: &csipbv1.NodeServiceCapability_RPC{ + Type: csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, + }, + }, + }, + }, + }, + ExpectedResponse: &NodeCapabilitySet{ + HasStageUnstageVolume: true, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + _, _, nc, client := newTestClient() + defer client.Close() + + nc.NextErr = tc.ResponseErr + nc.NextCapabilitiesResponse = tc.Response + + resp, err := client.NodeGetCapabilities(context.TODO()) + if tc.ExpectedErr != nil { + require.Error(t, tc.ExpectedErr, err) + } + + require.Equal(t, tc.ExpectedResponse, resp) + }) + } +} + func TestClient_RPC_ControllerPublishVolume(t *testing.T) { cases := []struct { Name string @@ -326,7 +393,7 @@ func TestClient_RPC_ControllerPublishVolume(t *testing.T) { for _, c := range cases { t.Run(c.Name, func(t *testing.T) { - _, cc, client := newTestClient() + _, cc, _, client := newTestClient() defer client.Close() cc.NextErr = c.ResponseErr diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index b2372906d..eb3d2c79b 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -43,6 +43,10 @@ type Client struct { NextControllerPublishVolumeErr error ControllerPublishVolumeCallCount int64 + NextNodeGetCapabilitiesResponse *csi.NodeCapabilitySet + NextNodeGetCapabilitiesErr error + NodeGetCapabilitiesCallCount int64 + NextNodeGetInfoResponse *csi.NodeGetInfoResponse NextNodeGetInfoErr error NodeGetInfoCallCount int64 @@ -122,6 +126,15 @@ func (c *Client) ControllerPublishVolume(ctx context.Context, req *csi.Controlle return c.NextControllerPublishVolumeResponse, c.NextControllerPublishVolumeErr } +func (c *Client) NodeGetCapabilities(ctx context.Context) (*csi.NodeCapabilitySet, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeGetCapabilitiesCallCount++ + + return c.NextNodeGetCapabilitiesResponse, c.NextNodeGetCapabilitiesErr +} + // NodeGetInfo is used to return semantic data about the current node in // respect to the SP. func (c *Client) NodeGetInfo(ctx context.Context) (*csi.NodeGetInfoResponse, error) { diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 56a813a28..7273c65f1 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -33,6 +33,10 @@ type CSIPlugin interface { // ControllerPublishVolume is used to attach a remote volume to a cluster node. ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + // NodeGetCapabilities is used to return the available capabilities from the + // Node Service. + NodeGetCapabilities(ctx context.Context) (*NodeCapabilitySet, error) + // NodeGetInfo is used to return semantic data about the current node in // respect to the SP. NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) @@ -133,3 +137,24 @@ type ControllerPublishVolumeRequest struct { type ControllerPublishVolumeResponse struct { PublishContext map[string]string } + +type NodeCapabilitySet struct { + HasStageUnstageVolume bool +} + +func NewNodeCapabilitySet(resp *csipbv1.NodeGetCapabilitiesResponse) *NodeCapabilitySet { + cs := &NodeCapabilitySet{} + pluginCapabilities := resp.GetCapabilities() + for _, pcap := range pluginCapabilities { + if c := pcap.GetRpc(); c != nil { + switch c.Type { + case csipbv1.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME: + cs.HasStageUnstageVolume = true + default: + continue + } + } + } + + return cs +} diff --git a/plugins/csi/testing/client.go b/plugins/csi/testing/client.go index 95739de46..75f20929d 100644 --- a/plugins/csi/testing/client.go +++ b/plugins/csi/testing/client.go @@ -77,3 +77,29 @@ func (c *ControllerClient) ControllerUnpublishVolume(ctx context.Context, in *cs func (c *ControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *csipbv1.ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.ValidateVolumeCapabilitiesResponse, error) { panic("not implemented") // TODO: Implement } + +// NodeClient is a CSI Node client used for testing +type NodeClient struct { + NextErr error + NextCapabilitiesResponse *csipbv1.NodeGetCapabilitiesResponse + NextGetInfoResponse *csipbv1.NodeGetInfoResponse +} + +// NewNodeClient returns a new ControllerClient +func NewNodeClient() *NodeClient { + return &NodeClient{} +} + +func (f *NodeClient) Reset() { + f.NextErr = nil + f.NextCapabilitiesResponse = nil + f.NextGetInfoResponse = nil +} + +func (c *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) { + return c.NextCapabilitiesResponse, c.NextErr +} + +func (c *NodeClient) NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetInfoResponse, error) { + return c.NextGetInfoResponse, c.NextErr +} From 57ae1d2cd6919938f53c4e9ff7c49b0c02ab398b Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 18 Dec 2019 13:24:39 +0100 Subject: [PATCH 013/126] csimanager: Fingerprint Node Service capabilities --- client/pluginmanager/csimanager/instance.go | 6 ++ .../pluginmanager/csimanager/instance_test.go | 61 +++++++++++++++++++ nomad/structs/node.go | 4 ++ 3 files changed, 71 insertions(+) diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index 5313bc07c..8aa4e2b06 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -161,6 +161,12 @@ func (i *instanceManager) buildNodeFingerprint(ctx context.Context, base *struct } fp.SetHealthy(healthy) + caps, err := i.client.NodeGetCapabilities(ctx) + if err != nil { + return fp, err + } + fp.NodeInfo.RequiresNodeStageVolume = caps.HasStageUnstageVolume + return fp, nil } diff --git a/client/pluginmanager/csimanager/instance_test.go b/client/pluginmanager/csimanager/instance_test.go index ba0e2d572..eeed5b57f 100644 --- a/client/pluginmanager/csimanager/instance_test.go +++ b/client/pluginmanager/csimanager/instance_test.go @@ -233,3 +233,64 @@ func TestBuildControllerFingerprint(t *testing.T) { }) } } + +func TestBuildNodeFingerprint(t *testing.T) { + tt := []struct { + Name string + + Capabilities *csi.NodeCapabilitySet + CapabilitiesErr error + CapabilitiesCallCount int64 + + ExpectedCSINodeInfo *structs.CSINodeInfo + ExpectedErr error + }{ + { + Name: "Minimal successful response", + + Capabilities: &csi.NodeCapabilitySet{}, + CapabilitiesCallCount: 1, + + ExpectedCSINodeInfo: &structs.CSINodeInfo{ + RequiresNodeStageVolume: false, + }, + }, + { + Name: "Successful response with capabilities and topologies", + + Capabilities: &csi.NodeCapabilitySet{ + HasStageUnstageVolume: true, + }, + CapabilitiesCallCount: 1, + + ExpectedCSINodeInfo: &structs.CSINodeInfo{ + RequiresNodeStageVolume: true, + }, + }, + { + Name: "NodeGetCapabilities Failed", + + CapabilitiesErr: errors.New("request failed"), + CapabilitiesCallCount: 1, + + ExpectedCSINodeInfo: &structs.CSINodeInfo{}, + ExpectedErr: errors.New("request failed"), + }, + } + + for _, test := range tt { + t.Run(test.Name, func(t *testing.T) { + client, im := setupTestNodeInstanceManager(t) + + client.NextNodeGetCapabilitiesResponse = test.Capabilities + client.NextNodeGetCapabilitiesErr = test.CapabilitiesErr + + info, err := im.buildNodeFingerprint(context.TODO(), &structs.CSIInfo{NodeInfo: &structs.CSINodeInfo{}}) + + require.Equal(t, test.ExpectedCSINodeInfo, info.NodeInfo) + require.Equal(t, test.ExpectedErr, err) + + require.Equal(t, test.CapabilitiesCallCount, client.NodeGetCapabilitiesCallCount) + }) + } +} diff --git a/nomad/structs/node.go b/nomad/structs/node.go index e5960dbbf..d95039d90 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -83,6 +83,10 @@ type CSINodeInfo struct { // Indicates the node exists within the "region" "R1" and the "zone" // "Z2" within the storage provider. AccessibleTopology *CSITopology + + // RequiresNodeStageVolume indicates whether the client should Stage/Unstage + // volumes on this node. + RequiresNodeStageVolume bool } func (n *CSINodeInfo) Copy() *CSINodeInfo { From 78b7784f2b500b485c70e05cc7dbadbd053b26f9 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 20 Dec 2019 12:38:44 +0100 Subject: [PATCH 014/126] api: Include CSI metadata on nodes --- api/nodes.go | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/api/nodes.go b/api/nodes.go index ec2fed2a3..a942c4545 100644 --- a/api/nodes.go +++ b/api/nodes.go @@ -464,6 +464,8 @@ type Node struct { Events []*NodeEvent Drivers map[string]*DriverInfo HostVolumes map[string]*HostVolumeInfo + CSIControllerPlugins map[string]*CSIInfo + CSINodePlugins map[string]*CSIInfo CreateIndex uint64 ModifyIndex uint64 } @@ -511,6 +513,41 @@ type NodeReservedNetworkResources struct { ReservedHostPorts string } +type CSITopology struct { + Segments map[string]string +} + +// CSINodeInfo is the fingerprinted data from a CSI Plugin that is specific to +// the Node API. +type CSINodeInfo struct { + ID string + MaxVolumes int64 + AccessibleTopology *CSITopology + RequiresNodeStageVolume bool +} + +// CSIControllerInfo is the fingerprinted data from a CSI Plugin that is specific to +// the Controller API. +type CSIControllerInfo struct { + SupportsReadOnlyAttach bool + SupportsAttachDetach bool + SupportsListVolumes bool + SupportsListVolumesAttachedNodes bool +} + +// CSIInfo is the current state of a single CSI Plugin. This is updated regularly +// as plugin health changes on the node. +type CSIInfo struct { + PluginID string + Healthy bool + HealthDescription string + UpdateTime time.Time + RequiresControllerPlugin bool + RequiresTopologies bool + ControllerInfo *CSIControllerInfo `json:",omitempty"` + NodeInfo *CSINodeInfo `json:",omitempty"` +} + // DrainStrategy describes a Node's drain behavior. type DrainStrategy struct { // DrainSpec is the user declared drain specification From 8fb312e48e36fc9cc060d9b5830d6788e051d445 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 20 Dec 2019 12:48:34 +0100 Subject: [PATCH 015/126] node_status: Add CSI Summary info This commit introduces two new fields to the basic output of `nomad node status `. 1) "CSI Controllers", which displays the names of registered controller plugins. 2) "CSI Drivers", which displays the names of registered CSI Node plugins. However, it does not implement support for verbose output, such as including health status or other fingerprinted data. --- command/node_status.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/command/node_status.go b/command/node_status.go index 4bbf443b9..3f8424e76 100644 --- a/command/node_status.go +++ b/command/node_status.go @@ -299,6 +299,24 @@ func nodeDrivers(n *api.Node) []string { return drivers } +func nodeCSIControllerNames(n *api.Node) []string { + var names []string + for name := range n.CSIControllerPlugins { + names = append(names, name) + } + sort.Strings(names) + return names +} + +func nodeCSINodeNames(n *api.Node) []string { + var names []string + for name := range n.CSINodePlugins { + names = append(names, name) + } + sort.Strings(names) + return names +} + func nodeVolumeNames(n *api.Node) []string { var volumes []string for name := range n.HostVolumes { @@ -340,6 +358,8 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { fmt.Sprintf("Drain|%v", formatDrain(node)), fmt.Sprintf("Eligibility|%s", node.SchedulingEligibility), fmt.Sprintf("Status|%s", node.Status), + fmt.Sprintf("CSI Controllers|%s", strings.Join(nodeCSIControllerNames(node), ",")), + fmt.Sprintf("CSI Drivers|%s", strings.Join(nodeCSINodeNames(node), ",")), } if c.short { From 973a61935e44759d94c6d441980689d43fa51871 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Sun, 12 Jan 2020 14:54:15 +0100 Subject: [PATCH 016/126] helper: Add initial grpc logging middleware --- .../logging/client_interceptors.go | 42 +++++++++ helper/grpc-middleware/logging/options.go | 89 +++++++++++++++++++ 2 files changed, 131 insertions(+) create mode 100644 helper/grpc-middleware/logging/client_interceptors.go create mode 100644 helper/grpc-middleware/logging/options.go diff --git a/helper/grpc-middleware/logging/client_interceptors.go b/helper/grpc-middleware/logging/client_interceptors.go new file mode 100644 index 000000000..6d19036fe --- /dev/null +++ b/helper/grpc-middleware/logging/client_interceptors.go @@ -0,0 +1,42 @@ +package logging + +import ( + "context" + "path" + "time" + + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc" + "google.golang.org/grpc/status" +) + +// UnaryClientInterceptor returns a new unary client interceptor that logs the execution of gRPC calls. +func UnaryClientInterceptor(logger hclog.Logger, opts ...Option) grpc.UnaryClientInterceptor { + o := evaluateClientOpt(opts) + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + startTime := time.Now() + err := invoker(ctx, method, req, reply, cc, opts...) + emitClientLog(logger, o, method, startTime, err, "finished client unary call") + return err + } +} + +// StreamClientInterceptor returns a new streaming client interceptor that logs the execution of gRPC calls. +func StreamClientInterceptor(logger hclog.Logger, opts ...Option) grpc.StreamClientInterceptor { + o := evaluateClientOpt(opts) + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + startTime := time.Now() + clientStream, err := streamer(ctx, desc, cc, method, opts...) + emitClientLog(logger, o, method, startTime, err, "finished client streaming call") + return clientStream, err + } +} + +func emitClientLog(logger hclog.Logger, o *options, fullMethodString string, startTime time.Time, err error, msg string) { + code := status.Code(err) + logLevel := o.levelFunc(code) + reqDuration := time.Now().Sub(startTime) + service := path.Dir(fullMethodString)[1:] + method := path.Base(fullMethodString) + logger.Log(logLevel, msg, "grpc.code", code, "duration", reqDuration, "grpc.service", service, "grpc.method", method) +} diff --git a/helper/grpc-middleware/logging/options.go b/helper/grpc-middleware/logging/options.go new file mode 100644 index 000000000..03e63b5d3 --- /dev/null +++ b/helper/grpc-middleware/logging/options.go @@ -0,0 +1,89 @@ +package logging + +import ( + "github.com/hashicorp/go-hclog" + "google.golang.org/grpc/codes" +) + +type options struct { + levelFunc CodeToLevel +} + +var defaultOptions = &options{} + +type Option func(*options) + +func evaluateClientOpt(opts []Option) *options { + optCopy := &options{} + *optCopy = *defaultOptions + optCopy.levelFunc = DefaultCodeToLevel + for _, o := range opts { + o(optCopy) + } + return optCopy +} + +func WithStatusCodeToLevelFunc(fn CodeToLevel) Option { + return func(opts *options) { + opts.levelFunc = fn + } +} + +// CodeToLevel function defines the mapping between gRPC return codes and hclog level. +type CodeToLevel func(code codes.Code) hclog.Level + +func DefaultCodeToLevel(code codes.Code) hclog.Level { + switch code { + // Trace Logs -- Useful for Nomad developers but not necessarily always wanted + case codes.OK: + return hclog.Trace + + // Debug logs + case codes.Canceled: + return hclog.Debug + case codes.InvalidArgument: + return hclog.Debug + case codes.ResourceExhausted: + return hclog.Debug + case codes.FailedPrecondition: + return hclog.Debug + case codes.Aborted: + return hclog.Debug + case codes.OutOfRange: + return hclog.Debug + case codes.NotFound: + return hclog.Debug + case codes.AlreadyExists: + return hclog.Debug + + // Info Logs - More curious/interesting than debug, but not necessarily critical + case codes.Unknown: + return hclog.Info + case codes.DeadlineExceeded: + return hclog.Info + case codes.PermissionDenied: + return hclog.Info + case codes.Unauthenticated: + // unauthenticated requests are probably usually fine? + return hclog.Info + case codes.Unavailable: + // unavailable errors indicate the upstream is not currently available. Info + // because I would guess these are usually transient and will be handled by + // retry mechanisms before being served as a higher level warning. + return hclog.Info + + // Warn Logs - These are almost definitely bad in most cases - usually because + // the upstream is broken. + case codes.Unimplemented: + return hclog.Warn + case codes.Internal: + return hclog.Warn + case codes.DataLoss: + return hclog.Warn + + default: + // Codes that aren't implemented as part of a CodeToLevel case are probably + // unknown and should be surfaced. + return hclog.Info + } +} From de5d3730015df122efd8327b40882eca37ef795e Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Sun, 12 Jan 2020 15:13:55 +0100 Subject: [PATCH 017/126] csi: Setup gRPC Clients with a logger --- .../allocrunner/taskrunner/plugin_supervisor_hook.go | 2 +- client/client.go | 4 ++-- client/pluginmanager/csimanager/instance.go | 2 +- plugins/csi/client.go | 10 +++++++--- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index 5774c4548..5169275e8 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -294,7 +294,7 @@ func (h *csiPluginSupervisorHook) supervisorLoopOnce(ctx context.Context, socket return false, fmt.Errorf("failed to stat socket: %v", err) } - client, err := csi.NewClient(socketPath) + client, err := csi.NewClient(socketPath, h.logger.Named("csi_client").With("plugin.name", h.task.CSIPluginConfig.ID, "plugin.type", h.task.CSIPluginConfig.Type)) defer client.Close() if err != nil { return false, fmt.Errorf("failed to create csi client: %v", err) diff --git a/client/client.go b/client/client.go index aa9ecbf97..b67ae389e 100644 --- a/client/client.go +++ b/client/client.go @@ -343,10 +343,10 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic serversContactedOnce: sync.Once{}, dynamicRegistry: dynamicplugins.NewRegistry(map[string]dynamicplugins.PluginDispenser{ dynamicplugins.PluginTypeCSIController: func(info *dynamicplugins.PluginInfo) (interface{}, error) { - return csi.NewClient(info.ConnectionInfo.SocketPath) + return csi.NewClient(info.ConnectionInfo.SocketPath, logger.Named("csi_client").With("plugin.name", info.Name, "plugin.type", "controller")) }, dynamicplugins.PluginTypeCSINode: func(info *dynamicplugins.PluginInfo) (interface{}, error) { - return csi.NewClient(info.ConnectionInfo.SocketPath) + return csi.NewClient(info.ConnectionInfo.SocketPath, logger.Named("csi_client").With("plugin.name", info.Name, "plugin.type", "client")) }, }), } diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index 8aa4e2b06..5240c9b8f 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -50,7 +50,7 @@ func newInstanceManager(logger hclog.Logger, updater UpdateNodeCSIInfoFunc, p *d } func (i *instanceManager) run() { - c, err := csi.NewClient(i.info.ConnectionInfo.SocketPath) + c, err := csi.NewClient(i.info.ConnectionInfo.SocketPath, i.logger.Named("csi_client").With("plugin.name", i.info.Name, "plugin.type", i.info.Type)) if err != nil { i.logger.Error("failed to setup instance manager client", "error", err) close(i.shutdownCh) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 8d45a7f71..556f9142a 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -7,7 +7,9 @@ import ( "time" csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/grpc-middleware/logging" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/shared/hclspec" "google.golang.org/grpc" @@ -81,12 +83,12 @@ func (c *client) Close() error { return nil } -func NewClient(addr string) (CSIPlugin, error) { +func NewClient(addr string, logger hclog.Logger) (CSIPlugin, error) { if addr == "" { return nil, fmt.Errorf("address is empty") } - conn, err := newGrpcConn(addr) + conn, err := newGrpcConn(addr, logger) if err != nil { return nil, err } @@ -99,10 +101,12 @@ func NewClient(addr string) (CSIPlugin, error) { }, nil } -func newGrpcConn(addr string) (*grpc.ClientConn, error) { +func newGrpcConn(addr string, logger hclog.Logger) (*grpc.ClientConn, error) { conn, err := grpc.Dial( addr, grpc.WithInsecure(), + grpc.WithUnaryInterceptor(logging.UnaryClientInterceptor(logger)), + grpc.WithStreamInterceptor(logging.StreamClientInterceptor(logger)), grpc.WithDialer(func(target string, timeout time.Duration) (net.Conn, error) { return net.DialTimeout("unix", target, timeout) }), From 637ce9dfade92670c630e194da0a8450eec52d69 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Mon, 14 Oct 2019 15:51:10 -0400 Subject: [PATCH 018/126] structs: new CSIVolume, request types --- nomad/structs/csi_volumes.go | 248 ++++++++++++++++++++++++++++++ nomad/structs/csi_volumes_test.go | 29 ++++ nomad/structs/errors.go | 2 + nomad/structs/node.go | 8 + nomad/structs/structs.go | 3 + 5 files changed, 290 insertions(+) create mode 100644 nomad/structs/csi_volumes.go create mode 100644 nomad/structs/csi_volumes_test.go diff --git a/nomad/structs/csi_volumes.go b/nomad/structs/csi_volumes.go new file mode 100644 index 000000000..24cfb0c50 --- /dev/null +++ b/nomad/structs/csi_volumes.go @@ -0,0 +1,248 @@ +package structs + +import ( + "fmt" + "strings" + "time" +) + +const ( + VolumeTypeCSI = "csi" +) + +type CSIVolume struct { + ID string + Driver string + Namespace string + Topology *CSITopology + MaxReaders int + MaxWriters int + + // Allocations, tracking claim status + ReadAllocs map[string]*Allocation + WriteAllocs map[string]*Allocation + PastAllocs map[string]*Allocation + + // Healthy is true iff all the denormalized plugin health fields are true, and the + // volume has not been marked for garbage collection + Healthy bool + VolumeGC time.Time + Controller *Job + ControllerHealthy bool + NodeHealthy int + NodeExpected int + + CreatedIndex uint64 + ModifiedIndex uint64 +} + +type CSIVolListStub struct { + ID string + Driver string + Namespace string + Topology *CSITopology + MaxReaders int + MaxWriters int + CurrentReaders int + CurrentWriters int + Healthy bool + VolumeGC time.Time + ControllerID string + ControllerHealthy bool + NodeHealthy int + NodeExpected int + CreatedIndex uint64 + ModifiedIndex uint64 +} + +func CreateCSIVolume(controller *Job) *CSIVolume { + return &CSIVolume{ + Controller: controller, + ReadAllocs: map[string]*Allocation{}, + WriteAllocs: map[string]*Allocation{}, + PastAllocs: map[string]*Allocation{}, + Topology: &CSITopology{}, + } +} + +func (v *CSIVolume) Stub() *CSIVolListStub { + stub := CSIVolListStub{ + ID: v.ID, + Driver: v.Driver, + Namespace: v.Namespace, + Topology: v.Topology, + MaxReaders: v.MaxReaders, + MaxWriters: v.MaxWriters, + CurrentReaders: len(v.ReadAllocs), + CurrentWriters: len(v.WriteAllocs), + Healthy: v.Healthy, + VolumeGC: v.VolumeGC, + NodeHealthy: v.NodeHealthy, + NodeExpected: v.NodeExpected, + CreatedIndex: v.CreatedIndex, + ModifiedIndex: v.ModifiedIndex, + } + + if v.Controller != nil { + stub.ControllerID = v.Controller.ID + stub.ControllerHealthy = v.Controller.Status == JobStatusRunning + } + + return &stub +} + +func (v *CSIVolume) CanReadOnly() bool { + if len(v.ReadAllocs) < v.MaxReaders { + return true + } + return false +} + +func (v *CSIVolume) CanWrite() bool { + if len(v.WriteAllocs) < v.MaxWriters { + return true + } + return false +} + +func (v *CSIVolume) Claim(claim CSIVolumeClaimMode, alloc *Allocation) bool { + switch claim { + case CSIVolumeClaimRead: + return v.ClaimRead(alloc) + case CSIVolumeClaimWrite: + return v.ClaimWrite(alloc) + case CSIVolumeClaimRelease: + return v.ClaimRelease(alloc) + } + return false +} + +func (v *CSIVolume) ClaimRead(alloc *Allocation) bool { + if !v.CanReadOnly() { + return false + } + v.ReadAllocs[alloc.ID] = alloc + delete(v.WriteAllocs, alloc.ID) + delete(v.PastAllocs, alloc.ID) + return true +} + +func (v *CSIVolume) ClaimWrite(alloc *Allocation) bool { + if !v.CanWrite() { + return false + } + v.WriteAllocs[alloc.ID] = alloc + delete(v.ReadAllocs, alloc.ID) + delete(v.PastAllocs, alloc.ID) + return true +} + +func (v *CSIVolume) ClaimRelease(alloc *Allocation) bool { + delete(v.ReadAllocs, alloc.ID) + delete(v.WriteAllocs, alloc.ID) + v.PastAllocs[alloc.ID] = alloc + return true +} + +// GCAlloc is called on Allocation gc, by following the alloc's pointer back to the volume +func (v *CSIVolume) GCAlloc(alloc *Allocation) { + delete(v.ReadAllocs, alloc.ID) + delete(v.WriteAllocs, alloc.ID) + delete(v.PastAllocs, alloc.ID) +} + +// Equality by value +func (v *CSIVolume) Equal(o *CSIVolume) bool { + if o == nil { + return false + } + + // Omit the plugin health fields, their values are controlled by plugin jobs + return v.ID == o.ID && + v.Driver == o.Driver && + v.Namespace == o.Namespace && + v.MaxReaders == o.MaxReaders && + v.MaxWriters == o.MaxWriters && + v.Controller == o.Controller && + v.Topology.Equal(o.Topology) +} + +// Validate validates the volume struct, returning all validation errors at once +func (v *CSIVolume) Validate() error { + errs := []string{} + + if v.ID == "" { + errs = append(errs, "missing volume id") + } + if v.Driver == "" { + errs = append(errs, "missing driver") + } + if v.Namespace == "" { + errs = append(errs, "missing namespace") + } + if v.MaxReaders == 0 && v.MaxWriters == 0 { + errs = append(errs, "missing access, set max readers and/or max writers") + } + if v.Topology == nil || len(v.Topology.Segments) == 0 { + errs = append(errs, "missing topology") + } + + if len(errs) > 0 { + return fmt.Errorf("validation: %s", strings.Join(errs, ", ")) + } + return nil +} + +// Request and response wrappers +type CSIVolumeRegisterRequest struct { + Volumes []*CSIVolume + WriteRequest +} + +type CSIVolumeRegisterResponse struct { + QueryMeta +} + +type CSIVolumeDeregisterRequest struct { + VolumeIDs []string + WriteRequest +} + +type CSIVolumeDeregisterResponse struct { + QueryMeta +} + +type CSIVolumeClaimMode int + +const ( + CSIVolumeClaimRead CSIVolumeClaimMode = iota + CSIVolumeClaimWrite + CSIVolumeClaimRelease +) + +type CSIVolumeClaimRequest struct { + VolumeID string + Allocation *Allocation + Claim CSIVolumeClaimMode + WriteRequest +} + +type CSIVolumeListRequest struct { + Driver string + QueryOptions +} + +type CSIVolumeListResponse struct { + Volumes []*CSIVolListStub + QueryMeta +} + +type CSIVolumeGetRequest struct { + ID string + QueryOptions +} + +type CSIVolumeGetResponse struct { + Volume *CSIVolume + QueryMeta +} diff --git a/nomad/structs/csi_volumes_test.go b/nomad/structs/csi_volumes_test.go new file mode 100644 index 000000000..b3fdc0876 --- /dev/null +++ b/nomad/structs/csi_volumes_test.go @@ -0,0 +1,29 @@ +package structs + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCSIVolumeClaim(t *testing.T) { + vol := CreateCSIVolume(nil) + vol.MaxReaders = 1 + vol.MaxWriters = 1 + + alloc := &Allocation{ID: "al"} + + vol.ClaimRead(alloc) + require.False(t, vol.CanReadOnly()) + require.True(t, vol.CanWrite()) + require.False(t, vol.ClaimRead(alloc)) + + vol.ClaimWrite(alloc) + require.True(t, vol.CanReadOnly()) + require.False(t, vol.CanWrite()) + require.False(t, vol.ClaimWrite(alloc)) + + vol.ClaimRelease(alloc) + require.True(t, vol.CanReadOnly()) + require.True(t, vol.CanWrite()) +} diff --git a/nomad/structs/errors.go b/nomad/structs/errors.go index 2e3e1edd2..e6a63bfd7 100644 --- a/nomad/structs/errors.go +++ b/nomad/structs/errors.go @@ -18,6 +18,7 @@ const ( errUnknownNomadVersion = "Unable to determine Nomad version" errNodeLacksRpc = "Node does not support RPC; requires 0.8 or later" errMissingAllocID = "Missing allocation ID" + errMissingCSIVolumeID = "Missing Volume ID" // Prefix based errors that are used to check if the error is of a given // type. These errors should be created with the associated constructor. @@ -41,6 +42,7 @@ var ( ErrUnknownNomadVersion = errors.New(errUnknownNomadVersion) ErrNodeLacksRpc = errors.New(errNodeLacksRpc) ErrMissingAllocID = errors.New(errMissingAllocID) + ErrMissingCSIVolumeID = errors.New(errMissingCSIVolumeID) ) // IsErrNoLeader returns whether the error is due to there being no leader. diff --git a/nomad/structs/node.go b/nomad/structs/node.go index d95039d90..2b1c578b9 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -54,6 +54,14 @@ func (t *CSITopology) Copy() *CSITopology { } } +func (t *CSITopology) Equal(o *CSITopology) bool { + if t == nil || o == nil { + return t == o + } + + return helper.CompareMapStringString(t.Segments, o.Segments) +} + // CSINodeInfo is the fingerprinted data from a CSI Plugin that is specific to // the Node API. type CSINodeInfo struct { diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index ee475bccb..03c981794 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -86,6 +86,9 @@ const ( ClusterMetadataRequestType ServiceIdentityAccessorRegisterRequestType ServiceIdentityAccessorDeregisterRequestType + CSIVolumeRegisterRequestType + CSIVolumeDeregisterRequestType + CSIVolumeClaimRequestType ) const ( From 3a7e1b6d14af6dd2ed828cca7277fd7935150623 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Wed, 8 Jan 2020 14:52:06 -0500 Subject: [PATCH 019/126] client structs: move CSIVolumeAttachmentMode and CSIVolumeAccessMode --- client/client_csi_endpoint.go | 5 +- client/client_csi_endpoint_test.go | 19 ++-- client/structs/csi.go | 54 +--------- nomad/structs/csi_volumes.go | 157 +++++++++++++++++++++-------- 4 files changed, 135 insertions(+), 100 deletions(-) diff --git a/client/client_csi_endpoint.go b/client/client_csi_endpoint.go index 95a3ed893..2985e6b6b 100644 --- a/client/client_csi_endpoint.go +++ b/client/client_csi_endpoint.go @@ -9,6 +9,7 @@ import ( metrics "github.com/armon/go-metrics" "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/structs" + s "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/csi" ) @@ -58,11 +59,11 @@ func (c *ClientCSI) CSIControllerAttachVolume(req *structs.ClientCSIControllerAt return errors.New("NodeID is required") } - if !structs.ValidCSIVolumeAccessMode(req.AccessMode) { + if !s.ValidCSIVolumeAccessMode(req.AccessMode) { return fmt.Errorf("Unknown access mode: %v", req.AccessMode) } - if !structs.ValidCSIVolumeAttachmentMode(req.AttachmentMode) { + if !s.ValidCSIVolumeAttachmentMode(req.AttachmentMode) { return fmt.Errorf("Unknown attachment mode: %v", req.AttachmentMode) } diff --git a/client/client_csi_endpoint_test.go b/client/client_csi_endpoint_test.go index 95e1fbf03..cac3e36fc 100644 --- a/client/client_csi_endpoint_test.go +++ b/client/client_csi_endpoint_test.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/structs" + s "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/csi" "github.com/hashicorp/nomad/plugins/csi/fake" "github.com/stretchr/testify/require" @@ -55,7 +56,7 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", - AccessMode: structs.CSIVolumeAccessMode("foo"), + AccessMode: s.CSIVolumeAccessMode("foo"), }, ExpectedErr: errors.New("Unknown access mode: foo"), }, @@ -65,8 +66,8 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", - AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, - AttachmentMode: structs.CSIVolumeAttachmentMode("bar"), + AccessMode: s.CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: s.CSIVolumeAttachmentMode("bar"), }, ExpectedErr: errors.New("Unknown attachment mode: bar"), }, @@ -79,8 +80,8 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", - AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + AccessMode: s.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: s.CSIVolumeAttachmentModeFilesystem, }, ExpectedErr: errors.New("hello"), }, @@ -93,8 +94,8 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", - AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + AccessMode: s.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: s.CSIVolumeAttachmentModeFilesystem, }, ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{}, }, @@ -109,8 +110,8 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", - AccessMode: structs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + AccessMode: s.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: s.CSIVolumeAttachmentModeFilesystem, }, ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{ PublishContext: map[string]string{"foo": "bar"}, diff --git a/client/structs/csi.go b/client/structs/csi.go index 0e37101c0..d08fecfe0 100644 --- a/client/structs/csi.go +++ b/client/structs/csi.go @@ -1,54 +1,10 @@ package structs -import "github.com/hashicorp/nomad/plugins/csi" - -// CSIVolumeAttachmentMode chooses the type of storage api that will be used to -// interact with the device. -type CSIVolumeAttachmentMode string - -const ( - CSIVolumeAttachmentModeUnknown CSIVolumeAttachmentMode = "" - CSIVolumeAttachmentModeBlockDevice CSIVolumeAttachmentMode = "block-device" - CSIVolumeAttachmentModeFilesystem CSIVolumeAttachmentMode = "file-system" +import ( + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" ) -func ValidCSIVolumeAttachmentMode(attachmentMode CSIVolumeAttachmentMode) bool { - switch attachmentMode { - case CSIVolumeAttachmentModeBlockDevice, CSIVolumeAttachmentModeFilesystem: - return true - default: - return false - } -} - -// CSIVolumeAccessMode indicates how a volume should be used in a storage topology -// e.g whether the provider should make the volume available concurrently. -type CSIVolumeAccessMode string - -const ( - CSIVolumeAccessModeUnknown CSIVolumeAccessMode = "" - - CSIVolumeAccessModeSingleNodeReader CSIVolumeAccessMode = "single-node-reader-only" - CSIVolumeAccessModeSingleNodeWriter CSIVolumeAccessMode = "single-node-writer" - - CSIVolumeAccessModeMultiNodeReader CSIVolumeAccessMode = "multi-node-reader-only" - CSIVolumeAccessModeMultiNodeSingleWriter CSIVolumeAccessMode = "multi-node-single-writer" - CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" -) - -// ValidCSIVolumeAccessMode checks to see that the provided access mode is a valid, -// non-empty access mode. -func ValidCSIVolumeAccessMode(accessMode CSIVolumeAccessMode) bool { - switch accessMode { - case CSIVolumeAccessModeSingleNodeReader, CSIVolumeAccessModeSingleNodeWriter, - CSIVolumeAccessModeMultiNodeReader, CSIVolumeAccessModeMultiNodeSingleWriter, - CSIVolumeAccessModeMultiNodeMultiWriter: - return true - default: - return false - } -} - // CSIVolumeMountOptions contains the mount options that should be provided when // attaching and mounting a volume with the CSIVolumeAttachmentModeFilesystem // attachment mode. @@ -77,10 +33,10 @@ type ClientCSIControllerAttachVolumeRequest struct { // AttachmentMode indicates how the volume should be attached and mounted into // a task. - AttachmentMode CSIVolumeAttachmentMode + AttachmentMode structs.CSIVolumeAttachmentMode // AccessMode indicates the desired concurrent access model for the volume - AccessMode CSIVolumeAccessMode + AccessMode structs.CSIVolumeAccessMode // MountOptions is an optional field that contains additional configuration // when providing an AttachmentMode of CSIVolumeAttachmentModeFilesystem diff --git a/nomad/structs/csi_volumes.go b/nomad/structs/csi_volumes.go index 24cfb0c50..c0102df4b 100644 --- a/nomad/structs/csi_volumes.go +++ b/nomad/structs/csi_volumes.go @@ -10,13 +10,62 @@ const ( VolumeTypeCSI = "csi" ) +// CSIVolumeAttachmentMode chooses the type of storage api that will be used to +// interact with the device. +type CSIVolumeAttachmentMode string + +const ( + CSIVolumeAttachmentModeUnknown CSIVolumeAttachmentMode = "" + CSIVolumeAttachmentModeBlockDevice CSIVolumeAttachmentMode = "block-device" + CSIVolumeAttachmentModeFilesystem CSIVolumeAttachmentMode = "file-system" +) + +func ValidCSIVolumeAttachmentMode(attachmentMode CSIVolumeAttachmentMode) bool { + switch attachmentMode { + case CSIVolumeAttachmentModeBlockDevice, CSIVolumeAttachmentModeFilesystem: + return true + default: + return false + } +} + +// CSIVolumeAccessMode indicates how a volume should be used in a storage topology +// e.g whether the provider should make the volume available concurrently. +type CSIVolumeAccessMode string + +const ( + CSIVolumeAccessModeUnknown CSIVolumeAccessMode = "" + + CSIVolumeAccessModeSingleNodeReader CSIVolumeAccessMode = "single-node-reader-only" + CSIVolumeAccessModeSingleNodeWriter CSIVolumeAccessMode = "single-node-writer" + + CSIVolumeAccessModeMultiNodeReader CSIVolumeAccessMode = "multi-node-reader-only" + CSIVolumeAccessModeMultiNodeSingleWriter CSIVolumeAccessMode = "multi-node-single-writer" + CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" +) + +// ValidCSIVolumeAccessMode checks to see that the provided access mode is a valid, +// non-empty access mode. +func ValidCSIVolumeAccessMode(accessMode CSIVolumeAccessMode) bool { + switch accessMode { + case CSIVolumeAccessModeSingleNodeReader, CSIVolumeAccessModeSingleNodeWriter, + CSIVolumeAccessModeMultiNodeReader, CSIVolumeAccessModeMultiNodeSingleWriter, + CSIVolumeAccessModeMultiNodeMultiWriter: + return true + default: + return false + } +} + type CSIVolume struct { - ID string - Driver string - Namespace string - Topology *CSITopology - MaxReaders int - MaxWriters int + ID string + Driver string + Namespace string + Topologies []*CSITopology + MaxReaders int + MaxWriters int + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode // Allocations, tracking claim status ReadAllocs map[string]*Allocation @@ -27,8 +76,9 @@ type CSIVolume struct { // volume has not been marked for garbage collection Healthy bool VolumeGC time.Time - Controller *Job + ControllerName string ControllerHealthy bool + Controller []*Job NodeHealthy int NodeExpected int @@ -40,14 +90,16 @@ type CSIVolListStub struct { ID string Driver string Namespace string - Topology *CSITopology + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode MaxReaders int MaxWriters int CurrentReaders int CurrentWriters int Healthy bool VolumeGC time.Time - ControllerID string + ControllerName string ControllerHealthy bool NodeHealthy int NodeExpected int @@ -55,37 +107,36 @@ type CSIVolListStub struct { ModifiedIndex uint64 } -func CreateCSIVolume(controller *Job) *CSIVolume { +func CreateCSIVolume(controllerName string) *CSIVolume { return &CSIVolume{ - Controller: controller, - ReadAllocs: map[string]*Allocation{}, - WriteAllocs: map[string]*Allocation{}, - PastAllocs: map[string]*Allocation{}, - Topology: &CSITopology{}, + ControllerName: controllerName, + ReadAllocs: map[string]*Allocation{}, + WriteAllocs: map[string]*Allocation{}, + PastAllocs: map[string]*Allocation{}, + Topologies: []*CSITopology{}, } } func (v *CSIVolume) Stub() *CSIVolListStub { stub := CSIVolListStub{ - ID: v.ID, - Driver: v.Driver, - Namespace: v.Namespace, - Topology: v.Topology, - MaxReaders: v.MaxReaders, - MaxWriters: v.MaxWriters, - CurrentReaders: len(v.ReadAllocs), - CurrentWriters: len(v.WriteAllocs), - Healthy: v.Healthy, - VolumeGC: v.VolumeGC, - NodeHealthy: v.NodeHealthy, - NodeExpected: v.NodeExpected, - CreatedIndex: v.CreatedIndex, - ModifiedIndex: v.ModifiedIndex, - } - - if v.Controller != nil { - stub.ControllerID = v.Controller.ID - stub.ControllerHealthy = v.Controller.Status == JobStatusRunning + ID: v.ID, + Driver: v.Driver, + Namespace: v.Namespace, + Topologies: v.Topologies, + AccessMode: v.AccessMode, + AttachmentMode: v.AttachmentMode, + MaxReaders: v.MaxReaders, + MaxWriters: v.MaxWriters, + CurrentReaders: len(v.ReadAllocs), + CurrentWriters: len(v.WriteAllocs), + Healthy: v.Healthy, + VolumeGC: v.VolumeGC, + ControllerName: v.ControllerName, + ControllerHealthy: v.ControllerHealthy, + NodeHealthy: v.NodeHealthy, + NodeExpected: v.NodeExpected, + CreatedIndex: v.CreatedIndex, + ModifiedIndex: v.ModifiedIndex, } return &stub @@ -153,18 +204,36 @@ func (v *CSIVolume) GCAlloc(alloc *Allocation) { // Equality by value func (v *CSIVolume) Equal(o *CSIVolume) bool { - if o == nil { - return false + if v == nil || o == nil { + return v == o } // Omit the plugin health fields, their values are controlled by plugin jobs - return v.ID == o.ID && + if v.ID == o.ID && v.Driver == o.Driver && v.Namespace == o.Namespace && v.MaxReaders == o.MaxReaders && v.MaxWriters == o.MaxWriters && - v.Controller == o.Controller && - v.Topology.Equal(o.Topology) + v.AccessMode == o.AccessMode && + v.AttachmentMode == o.AttachmentMode && + v.ControllerName == o.ControllerName { + // Setwise equality of topologies + var ok bool + for _, t := range v.Topologies { + ok = false + for _, u := range o.Topologies { + if t.Equal(u) { + ok = true + break + } + } + if !ok { + return false + } + } + return true + } + return false } // Validate validates the volume struct, returning all validation errors at once @@ -183,7 +252,15 @@ func (v *CSIVolume) Validate() error { if v.MaxReaders == 0 && v.MaxWriters == 0 { errs = append(errs, "missing access, set max readers and/or max writers") } - if v.Topology == nil || len(v.Topology.Segments) == 0 { + + var ok bool + for _, t := range v.Topologies { + if t != nil && len(t.Segments) > 0 { + ok = true + break + } + } + if !ok { errs = append(errs, "missing topology") } From f9d9faf6739ad93925b0b792ccbd477d1f20189f Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Wed, 8 Jan 2020 17:27:43 -0500 Subject: [PATCH 020/126] structs: eliminate MaxReaders & MaxWriters --- nomad/structs/csi_volumes.go | 48 ++++++++++++++++++++----------- nomad/structs/csi_volumes_test.go | 10 +++---- 2 files changed, 37 insertions(+), 21 deletions(-) diff --git a/nomad/structs/csi_volumes.go b/nomad/structs/csi_volumes.go index c0102df4b..5b5cfe45f 100644 --- a/nomad/structs/csi_volumes.go +++ b/nomad/structs/csi_volumes.go @@ -57,13 +57,22 @@ func ValidCSIVolumeAccessMode(accessMode CSIVolumeAccessMode) bool { } } +func ValidCSIVolumeWriteAccessMode(accessMode CSIVolumeAccessMode) bool { + switch accessMode { + case CSIVolumeAccessModeSingleNodeWriter, + CSIVolumeAccessModeMultiNodeSingleWriter, + CSIVolumeAccessModeMultiNodeMultiWriter: + return true + default: + return false + } +} + type CSIVolume struct { ID string Driver string Namespace string Topologies []*CSITopology - MaxReaders int - MaxWriters int AccessMode CSIVolumeAccessMode AttachmentMode CSIVolumeAttachmentMode @@ -81,6 +90,7 @@ type CSIVolume struct { Controller []*Job NodeHealthy int NodeExpected int + ResourceExhausted time.Time CreatedIndex uint64 ModifiedIndex uint64 @@ -93,8 +103,6 @@ type CSIVolListStub struct { Topologies []*CSITopology AccessMode CSIVolumeAccessMode AttachmentMode CSIVolumeAttachmentMode - MaxReaders int - MaxWriters int CurrentReaders int CurrentWriters int Healthy bool @@ -125,8 +133,6 @@ func (v *CSIVolume) Stub() *CSIVolListStub { Topologies: v.Topologies, AccessMode: v.AccessMode, AttachmentMode: v.AttachmentMode, - MaxReaders: v.MaxReaders, - MaxWriters: v.MaxWriters, CurrentReaders: len(v.ReadAllocs), CurrentWriters: len(v.WriteAllocs), Healthy: v.Healthy, @@ -143,17 +149,26 @@ func (v *CSIVolume) Stub() *CSIVolListStub { } func (v *CSIVolume) CanReadOnly() bool { - if len(v.ReadAllocs) < v.MaxReaders { - return true + if !v.Healthy { + return false } - return false + + return v.ResourceExhausted == time.Time{} } func (v *CSIVolume) CanWrite() bool { - if len(v.WriteAllocs) < v.MaxWriters { - return true + if !v.Healthy { + return false + } + + switch v.AccessMode { + case CSIVolumeAccessModeSingleNodeWriter, CSIVolumeAccessModeMultiNodeSingleWriter: + return len(v.WriteAllocs) == 0 + case CSIVolumeAccessModeMultiNodeMultiWriter: + return v.ResourceExhausted == time.Time{} + default: + return false } - return false } func (v *CSIVolume) Claim(claim CSIVolumeClaimMode, alloc *Allocation) bool { @@ -212,8 +227,6 @@ func (v *CSIVolume) Equal(o *CSIVolume) bool { if v.ID == o.ID && v.Driver == o.Driver && v.Namespace == o.Namespace && - v.MaxReaders == o.MaxReaders && - v.MaxWriters == o.MaxWriters && v.AccessMode == o.AccessMode && v.AttachmentMode == o.AttachmentMode && v.ControllerName == o.ControllerName { @@ -249,8 +262,11 @@ func (v *CSIVolume) Validate() error { if v.Namespace == "" { errs = append(errs, "missing namespace") } - if v.MaxReaders == 0 && v.MaxWriters == 0 { - errs = append(errs, "missing access, set max readers and/or max writers") + if v.AccessMode == "" { + errs = append(errs, "missing access mode") + } + if v.AttachmentMode == "" { + errs = append(errs, "missing attachment mode") } var ok bool diff --git a/nomad/structs/csi_volumes_test.go b/nomad/structs/csi_volumes_test.go index b3fdc0876..85fbaf58d 100644 --- a/nomad/structs/csi_volumes_test.go +++ b/nomad/structs/csi_volumes_test.go @@ -7,16 +7,16 @@ import ( ) func TestCSIVolumeClaim(t *testing.T) { - vol := CreateCSIVolume(nil) - vol.MaxReaders = 1 - vol.MaxWriters = 1 + vol := CreateCSIVolume("") + vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter + vol.Healthy = true alloc := &Allocation{ID: "al"} vol.ClaimRead(alloc) - require.False(t, vol.CanReadOnly()) + require.True(t, vol.CanReadOnly()) require.True(t, vol.CanWrite()) - require.False(t, vol.ClaimRead(alloc)) + require.True(t, vol.ClaimRead(alloc)) vol.ClaimWrite(alloc) require.True(t, vol.CanReadOnly()) From 2eb288426cdf4b018230ca145eb54eb1ae408573 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Mon, 14 Oct 2019 16:09:27 -0400 Subject: [PATCH 021/126] docs: update checklist-rpc-endpoint --- contributing/checklist-rpc-endpoint.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contributing/checklist-rpc-endpoint.md b/contributing/checklist-rpc-endpoint.md index d6f62299d..30b5d36e6 100644 --- a/contributing/checklist-rpc-endpoint.md +++ b/contributing/checklist-rpc-endpoint.md @@ -7,7 +7,7 @@ Prefer adding a new message to changing any existing RPC messages. * [ ] `Request` struct and `*RequestType` constant in `nomad/structs/structs.go`. Append the constant, old constant values must remain unchanged -* [ ] In `nomad/fsm.go`, add a dispatch case to the switch statement in `Apply` +* [ ] In `nomad/fsm.go`, add a dispatch case to the switch statement in `(n *nomadFSM) Apply` * `*nomadFSM` method to decode the request and call the state method * [ ] State method for modifying objects in a `Txn` in `nomad/state/state_store.go` * `nomad/state/state_store_test.go` From 857cd37ab5d894b9fc66bdd5616f92af53a64266 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Mon, 14 Oct 2019 16:09:05 -0400 Subject: [PATCH 022/126] fsm: dispatch CSIVolume register, deregister, claim --- nomad/fsm.go | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/nomad/fsm.go b/nomad/fsm.go index c8e7f8f07..1b47c5dc5 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -260,6 +260,12 @@ func (n *nomadFSM) Apply(log *raft.Log) interface{} { return n.applyUpsertSIAccessor(buf[1:], log.Index) case structs.ServiceIdentityAccessorDeregisterRequestType: return n.applyDeregisterSIAccessor(buf[1:], log.Index) + case structs.CSIVolumeRegisterRequestType: + return n.applyCSIVolumeRegister(buf[1:], log.Index) + case structs.CSIVolumeDeregisterRequestType: + return n.applyCSIVolumeDeregister(buf[1:], log.Index) + case structs.CSIVolumeClaimRequestType: + return n.applyCSIVolumeClaim(buf[1:], log.Index) } // Check enterprise only message types. @@ -1114,6 +1120,51 @@ func (n *nomadFSM) applySchedulerConfigUpdate(buf []byte, index uint64) interfac return n.state.SchedulerSetConfig(index, &req.Config) } +func (n *nomadFSM) applyCSIVolumeRegister(buf []byte, index uint64) interface{} { + var req structs.CSIVolumeRegisterRequest + if err := structs.Decode(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode request: %v", err)) + } + defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_register"}, time.Now()) + + if err := n.state.CSIVolumeRegister(index, req.Volumes); err != nil { + n.logger.Error("CSIVolumeRegister failed", "error", err) + return err + } + + return nil +} + +func (n *nomadFSM) applyCSIVolumeDeregister(buf []byte, index uint64) interface{} { + var req structs.CSIVolumeDeregisterRequest + if err := structs.Decode(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode request: %v", err)) + } + defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_deregister"}, time.Now()) + + if err := n.state.CSIVolumeDeregister(index, req.Namespace, req.VolumeIDs); err != nil { + n.logger.Error("CSIVolumeDeregister failed", "error", err) + return err + } + + return nil +} + +func (n *nomadFSM) applyCSIVolumeClaim(buf []byte, index uint64) interface{} { + var req structs.CSIVolumeClaimRequest + if err := structs.Decode(buf, &req); err != nil { + panic(fmt.Errorf("failed to decode request: %v", err)) + } + defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_claim"}, time.Now()) + + if err := n.state.CSIVolumeClaim(index, req.Namespace, req.VolumeID, req.Allocation, req.Claim); err != nil { + n.logger.Error("CSIVolumeClaim failed", "error", err) + return err + } + + return nil +} + func (n *nomadFSM) Snapshot() (raft.FSMSnapshot, error) { // Create a new snapshot snap, err := n.state.Snapshot() From 0422b967db6e82f8e2ef0e4f1caf5b3383bfd41b Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Tue, 15 Oct 2019 11:43:55 -0400 Subject: [PATCH 023/126] schema: csi_volumes schema --- nomad/state/schema.go | 44 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/nomad/state/schema.go b/nomad/state/schema.go index a5ec55e71..ffef13f0e 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -47,6 +47,7 @@ func init() { autopilotConfigTableSchema, schedulerConfigTableSchema, clusterMetaTableSchema, + csiVolumeTableSchema, }...) } @@ -677,3 +678,46 @@ func clusterMetaTableSchema() *memdb.TableSchema { }, } } + +func csiVolumeTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: "csi_volumes", + Indexes: map[string]*memdb.IndexSchema{ + // Primary index is used for volume upsert + // and simple direct lookup. ID is required to be + // unique. + "id": { + Name: "id", + AllowMissing: false, + Unique: true, + + // Use a compound so (Namespace, ID) is unique + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Namespace", + }, + &memdb.StringFieldIndex{ + Field: "ID", + }, + }, + }, + }, + "driver": { + Name: "driver", + AllowMissing: false, + Unique: false, + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Namespace", + }, + &memdb.StringFieldIndex{ + Field: "Driver", + }, + }, + }, + }, + }, + } +} From 4bb4dd98eb22a2adfb3b39a137d37c1dd6c608af Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Tue, 15 Oct 2019 11:40:40 -0400 Subject: [PATCH 024/126] state_store: CSIVolume insert, get, delete, claim state_store: change claim counts state_store: get volumes by all, by driver state_store: process volume claims state_store: csi volume register error on update --- nomad/state/state_store.go | 136 ++++++++++++++++++++++++++++++++ nomad/state/state_store_test.go | 92 +++++++++++++++++++++ 2 files changed, 228 insertions(+) diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index df3fe4d30..3d3b36197 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1511,6 +1511,142 @@ func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, namespace, id string) return iter, nil } +// CSIVolumeRegister adds a volume to the server store, iff it's not new +func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolume) error { + txn := s.db.Txn(true) + defer txn.Abort() + + for _, v := range volumes { + // Check for volume existence + _, obj, err := txn.FirstWatch("csi_volumes", "id", v.Namespace, v.ID) + if err != nil { + return fmt.Errorf("volume existence check: %v", err) + } + if obj != nil { + return fmt.Errorf("volume exists: %s", v.ID) + } + + err = txn.Insert("csi_volumes", v) + if err != nil { + return fmt.Errorf("volume insert: %v", err) + } + } + + txn.Commit() + return nil +} + +// CSIVolumeByID is used to lookup a single volume +func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, namespace, id string) (*structs.CSIVolume, error) { + txn := s.db.Txn(false) + + watchCh, obj, err := txn.FirstWatch("csi_volumes", "id", namespace, id) + if err != nil { + return nil, fmt.Errorf("volume lookup failed: %s %s %v", namespace, id, err) + } + ws.Add(watchCh) + + if obj != nil { + v := obj.(*structs.CSIVolume) + return v, nil + } + + return nil, nil +} + +// CSIVolumes looks up the entire csi_volumes table +func (s *StateStore) CSIVolumesByNS(ws memdb.WatchSet, namespace string) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + + iter, err := txn.Get("csi_volumes", "id_prefix", namespace) + if err != nil { + return nil, fmt.Errorf("volume lookup failed: %v", err) + } + ws.Add(iter.WatchCh()) + + return iter, nil +} + +// CSIVolumes looks up the entire csi_volumes table +func (s *StateStore) CSIVolumesByNSDriver(ws memdb.WatchSet, namespace, driver string) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + + iter, err := txn.Get("csi_volumes", "driver", namespace, driver) + if err != nil { + return nil, fmt.Errorf("volume lookup failed: %v", err) + } + ws.Add(iter.WatchCh()) + + return iter, nil +} + +// CSIVolumes looks up the entire csi_volumes table +func (s *StateStore) CSIVolumes(ws memdb.WatchSet) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + + iter, err := txn.Get("csi_volumes", "id") + if err != nil { + return nil, fmt.Errorf("volume lookup failed: %v", err) + } + ws.Add(iter.WatchCh()) + + return iter, nil +} + +// CSIVolumeClaim updates the volume's claim count and allocation list +func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, alloc *structs.Allocation, claim structs.CSIVolumeClaimMode) error { + txn := s.db.Txn(true) + defer txn.Abort() + + row, err := txn.First("csi_volumes", "id", namespace, id) + if err != nil { + return fmt.Errorf("volume lookup failed: %s %s: %v", namespace, id, err) + } + if row == nil { + return fmt.Errorf("volume not found: %s %s", namespace, id) + } + + volume, ok := row.(*structs.CSIVolume) + if !ok { + return fmt.Errorf("volume row conversion error") + } + + if !volume.Claim(claim, alloc) { + return fmt.Errorf("volume max claim reached") + } + + if err = txn.Insert("csi_volumes", volume); err != nil { + return fmt.Errorf("volume update failed: %s %s: %v", namespace, id, err) + } + + txn.Commit() + return nil +} + +// CSIVolumeDeregister removes the volume from the server +func (s *StateStore) CSIVolumeDeregister(index uint64, namespace string, ids []string) error { + txn := s.db.Txn(true) + defer txn.Abort() + + for _, id := range ids { + existing, err := txn.First("csi_volumes", "id", namespace, id) + if err != nil { + return fmt.Errorf("volume lookup failed: %s %s: %v", namespace, id, err) + } + + if existing == nil { + return fmt.Errorf("volume not found: %s %s", namespace, id) + } + + if err = txn.Delete("csi_volumes", existing); err != nil { + return fmt.Errorf("volume delete failed: %s %s: %v", namespace, id, err) + } + } + + txn.Commit() + return nil +} + // UpsertPeriodicLaunch is used to register a launch or update it. func (s *StateStore) UpsertPeriodicLaunch(index uint64, launch *structs.PeriodicLaunch) error { txn := s.db.Txn(true) diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index fb1d51b74..a074c4ac3 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2826,6 +2826,98 @@ func TestStateStore_RestoreJobSummary(t *testing.T) { } } +// TestStateStore_CSIVolume checks register, list and deregister for csi_volumes +func TestStateStore_CSIVolume(t *testing.T) { + state := testStateStore(t) + + v0 := structs.CreateCSIVolume("foo") + v0.ID = "DEADBEEF-70AD-4672-9178-802BCA500C87" + v0.Namespace = "default" + v0.Driver = "minnie" + v0.Healthy = true + v0.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter + v0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + + v1 := structs.CreateCSIVolume("foo") + v1.ID = "BAADF00D-70AD-4672-9178-802BCA500C87" + v1.Namespace = "default" + v1.Driver = "adam" + v1.Healthy = true + v1.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter + v1.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + + err := state.CSIVolumeRegister(0, []*structs.CSIVolume{v0, v1}) + require.NoError(t, err) + + ws := memdb.NewWatchSet() + iter, err := state.CSIVolumes(ws) + require.NoError(t, err) + + slurp := func(iter memdb.ResultIterator) (vs []*structs.CSIVolume) { + for { + raw := iter.Next() + if raw == nil { + break + } + vol := raw.(*structs.CSIVolume) + vs = append(vs, vol) + } + return vs + } + + vs := slurp(iter) + require.Equal(t, 2, len(vs)) + + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByNSDriver(ws, "default", "minnie") + require.NoError(t, err) + vs = slurp(iter) + require.Equal(t, 1, len(vs)) + + err = state.CSIVolumeDeregister(1, "default", []string{ + "BAADF00D-70AD-4672-9178-802BCA500C87", + }) + require.NoError(t, err) + + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByNSDriver(ws, "default", "adam") + require.NoError(t, err) + vs = slurp(iter) + require.Equal(t, 0, len(vs)) + + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumes(ws) + require.NoError(t, err) + vs = slurp(iter) + require.Equal(t, 1, len(vs)) + + // Claims + a0 := &structs.Allocation{ID: "al"} + a1 := &structs.Allocation{ID: "gator"} + r := structs.CSIVolumeClaimRead + w := structs.CSIVolumeClaimWrite + u := structs.CSIVolumeClaimRelease + + err = state.CSIVolumeClaim(2, "default", "DEADBEEF-70AD-4672-9178-802BCA500C87", a0, r) + require.NoError(t, err) + err = state.CSIVolumeClaim(2, "default", "DEADBEEF-70AD-4672-9178-802BCA500C87", a1, w) + require.NoError(t, err) + + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByNSDriver(ws, "default", "minnie") + require.NoError(t, err) + vs = slurp(iter) + require.False(t, vs[0].CanWrite()) + + err = state.CSIVolumeClaim(2, "default", "DEADBEEF-70AD-4672-9178-802BCA500C87", a0, u) + require.NoError(t, err) + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByNSDriver(ws, "default", "minnie") + require.NoError(t, err) + vs = slurp(iter) + require.True(t, vs[0].CanReadOnly()) +} + func TestStateStore_Indexes(t *testing.T) { t.Parallel() From 03817e16d493a8ed243776702447bdd98ed36c3f Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Mon, 28 Oct 2019 11:59:28 -0400 Subject: [PATCH 025/126] acl: new NamespaceCapabilityCSIAccess, CSICreateVolume --- acl/policy.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/acl/policy.go b/acl/policy.go index bb22b45b8..b077d053e 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -33,6 +33,9 @@ const ( NamespaceCapabilityAllocNodeExec = "alloc-node-exec" NamespaceCapabilityAllocLifecycle = "alloc-lifecycle" NamespaceCapabilitySentinelOverride = "sentinel-override" + NamespaceCapabilityPrivilegedTask = "privileged-task" + NamespaceCapabilityCSIAccess = "csi-access" + NamespaceCapabilityCSICreateVolume = "csi-create-volume" ) var ( @@ -122,7 +125,8 @@ func isNamespaceCapabilityValid(cap string) bool { case NamespaceCapabilityDeny, NamespaceCapabilityListJobs, NamespaceCapabilityReadJob, NamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, NamespaceCapabilityReadLogs, NamespaceCapabilityReadFS, NamespaceCapabilityAllocLifecycle, - NamespaceCapabilityAllocExec, NamespaceCapabilityAllocNodeExec: + NamespaceCapabilityAllocExec, NamespaceCapabilityAllocNodeExec, + NamespaceCapabilityCSIAccess, NamespaceCapabilityCSICreateVolume: return true // Separate the enterprise-only capabilities case NamespaceCapabilitySentinelOverride: From 8f33fb9a6d701792503151e6afc25bca86c1260b Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Wed, 16 Oct 2019 16:53:39 -0400 Subject: [PATCH 026/126] csi volume endpoint: new RPC endpoint for CSI volumes --- nomad/csi_volume_endpoint.go | 255 ++++++++++++++++++++++++++++++ nomad/csi_volume_endpoint_test.go | 231 +++++++++++++++++++++++++++ 2 files changed, 486 insertions(+) create mode 100644 nomad/csi_volume_endpoint.go create mode 100644 nomad/csi_volume_endpoint_test.go diff --git a/nomad/csi_volume_endpoint.go b/nomad/csi_volume_endpoint.go new file mode 100644 index 000000000..846cee249 --- /dev/null +++ b/nomad/csi_volume_endpoint.go @@ -0,0 +1,255 @@ +package nomad + +import ( + "fmt" + "time" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + memdb "github.com/hashicorp/go-memdb" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/nomad/state" + "github.com/hashicorp/nomad/nomad/structs" +) + +// CSIVolume wraps the structs.CSIVolume with request data and server context +type CSIVolume struct { + srv *Server + logger log.Logger +} + +// QueryACLObj looks up the ACL token in the request and returns the acl.ACL object +// - fallback to node secret ids +func (srv *Server) QueryACLObj(args *structs.QueryOptions) (*acl.ACL, error) { + if args.AuthToken == "" { + return nil, fmt.Errorf("authorization required") + } + + // Lookup the token + aclObj, err := srv.ResolveToken(args.AuthToken) + if err != nil { + // If ResolveToken had an unexpected error return that + return nil, err + } + + if aclObj == nil { + ws := memdb.NewWatchSet() + node, stateErr := srv.fsm.State().NodeBySecretID(ws, args.AuthToken) + if stateErr != nil { + // Return the original ResolveToken error with this err + var merr multierror.Error + merr.Errors = append(merr.Errors, err, stateErr) + return nil, merr.ErrorOrNil() + } + + if node == nil { + return nil, structs.ErrTokenNotFound + } + } + + return aclObj, nil +} + +// WriteACLObj calls QueryACLObj for a WriteRequest +func (srv *Server) WriteACLObj(args *structs.WriteRequest) (*acl.ACL, error) { + opts := &structs.QueryOptions{ + Region: args.RequestRegion(), + Namespace: args.RequestNamespace(), + AuthToken: args.AuthToken, + } + return srv.QueryACLObj(opts) +} + +// replyCSIVolumeIndex sets the reply with the last index that modified the table csi_volumes +func (srv *Server) replySetCSIVolumeIndex(state *state.StateStore, reply *structs.QueryMeta) error { + // Use the last index that affected the table + index, err := state.Index("csi_volumes") + if err != nil { + return err + } + reply.Index = index + + // Set the query response + srv.setQueryMeta(reply) + return nil +} + +// List replies with CSIVolumes, filtered by ACL access +func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIVolumeListResponse) error { + if done, err := v.srv.forward("CSIVolume.List", args, args, reply); done { + return err + } + + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions) + if err != nil { + return err + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "volume", "list"}, metricsStart) + + ns := args.RequestNamespace() + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, state *state.StateStore) error { + // Query all volumes + var err error + var iter memdb.ResultIterator + + if ns == "" && args.Driver == "" { + iter, err = state.CSIVolumes(ws) + } else if args.Driver != "" { + iter, err = state.CSIVolumesByNSDriver(ws, args.Namespace, args.Driver) + } else { + iter, err = state.CSIVolumesByNS(ws, args.Namespace) + } + + if err != nil { + return err + } + + // Collect results, filter by ACL access + var vs []*structs.CSIVolListStub + cache := map[string]bool{} + + for { + raw := iter.Next() + if raw == nil { + break + } + vol := raw.(*structs.CSIVolume) + + // Filter on the request namespace to avoid ACL checks by volume + if ns != "" && vol.Namespace != args.RequestNamespace() { + continue + } + + // Cache ACL checks QUESTION: are they expensive? + allowed, ok := cache[vol.Namespace] + if !ok { + allowed = aclObj.AllowNsOp(vol.Namespace, acl.NamespaceCapabilityCSIAccess) + cache[vol.Namespace] = allowed + } + + if allowed { + vs = append(vs, vol.Stub()) + } + } + reply.Volumes = vs + return v.srv.replySetCSIVolumeIndex(state, &reply.QueryMeta) + }} + return v.srv.blockingRPC(&opts) +} + +// Get fetches detailed information about a specific volume +func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVolumeGetResponse) error { + if done, err := v.srv.forward("CSIVolume.Get", args, args, reply); done { + return err + } + + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions) + if err != nil { + return err + } + + if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSIAccess) { + return structs.ErrPermissionDenied + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "volume", "get"}, metricsStart) + + ns := args.RequestNamespace() + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, state *state.StateStore) error { + vol, err := state.CSIVolumeByID(ws, ns, args.ID) + if err != nil { + return err + } + + if vol == nil { + return structs.ErrMissingCSIVolumeID + } + + reply.Volume = vol + return v.srv.replySetCSIVolumeIndex(state, &reply.QueryMeta) + }} + return v.srv.blockingRPC(&opts) +} + +// Register registers a new volume +func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *structs.CSIVolumeRegisterResponse) error { + if done, err := v.srv.forward("CSIVolume.Register", args, args, reply); done { + return err + } + + aclObj, err := v.srv.WriteACLObj(&args.WriteRequest) + if err != nil { + return err + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "volume", "register"}, metricsStart) + + if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSICreateVolume) { + return structs.ErrPermissionDenied + } + + // This is the only namespace we ACL checked, force all the volumes to use it + for _, v := range args.Volumes { + v.Namespace = args.RequestNamespace() + if err = v.Validate(); err != nil { + return err + } + } + + state := v.srv.State() + index, err := state.LatestIndex() + if err != nil { + return err + } + + err = state.CSIVolumeRegister(index, args.Volumes) + if err != nil { + return err + } + + return v.srv.replySetCSIVolumeIndex(state, &reply.QueryMeta) +} + +// Deregister removes a set of volumes +func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply *structs.CSIVolumeDeregisterResponse) error { + if done, err := v.srv.forward("CSIVolume.Deregister", args, args, reply); done { + return err + } + + aclObj, err := v.srv.WriteACLObj(&args.WriteRequest) + if err != nil { + return err + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "volume", "deregister"}, metricsStart) + + ns := args.RequestNamespace() + if !aclObj.AllowNsOp(ns, acl.NamespaceCapabilityCSICreateVolume) { + return structs.ErrPermissionDenied + } + + state := v.srv.State() + index, err := state.LatestIndex() + if err != nil { + return err + } + + err = state.CSIVolumeDeregister(index, ns, args.VolumeIDs) + if err != nil { + return err + } + + return v.srv.replySetCSIVolumeIndex(state, &reply.QueryMeta) +} diff --git a/nomad/csi_volume_endpoint_test.go b/nomad/csi_volume_endpoint_test.go new file mode 100644 index 000000000..35c3c371b --- /dev/null +++ b/nomad/csi_volume_endpoint_test.go @@ -0,0 +1,231 @@ +package nomad + +import ( + "testing" + + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/require" +) + +func TestCSIVolumeEndpoint_Get(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + + state := srv.fsm.State() + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + srv.config.ACLEnabled = true + policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) + validToken := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) + + codec := rpcClient(t, srv) + + // Create the volume + vols := []*structs.CSIVolume{{ + ID: "DEADBEEF-70AD-4672-9178-802BCA500C87", + Namespace: ns, + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + Driver: "minnie", + }} + err := state.CSIVolumeRegister(0, vols) + require.NoError(t, err) + + // Create the register request + req := &structs.CSIVolumeGetRequest{ + ID: "DEADBEEF-70AD-4672-9178-802BCA500C87", + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: ns, + AuthToken: validToken.SecretID, + }, + } + + var resp structs.CSIVolumeGetResponse + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req, &resp) + require.NoError(t, err) + require.NotEqual(t, 0, resp.Index) + require.Equal(t, vols[0].ID, resp.Volume.ID) +} + +func TestCSIVolumeEndpoint_Register(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + + state := srv.fsm.State() + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + srv.config.ACLEnabled = true + policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSICreateVolume}) + validToken := mock.CreatePolicyAndToken(t, state, 1001, acl.NamespaceCapabilityCSICreateVolume, policy) + + codec := rpcClient(t, srv) + + // Create the volume + vols := []*structs.CSIVolume{{ + ID: "DEADBEEF-70AD-4672-9178-802BCA500C87", + Namespace: "notTheNamespace", + Driver: "minnie", + AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + Topologies: []*structs.CSITopology{{ + Segments: map[string]string{"foo": "bar"}, + }}, + }} + + // Create the register request + req1 := &structs.CSIVolumeRegisterRequest{ + Volumes: vols, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + AuthToken: validToken.SecretID, + }, + } + resp1 := &structs.CSIVolumeRegisterResponse{} + err := msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1) + require.NoError(t, err) + require.NotEqual(t, 0, resp1.Index) + + // Get the volume back out + policy = mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) + getToken := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) + + req2 := &structs.CSIVolumeGetRequest{ + ID: "DEADBEEF-70AD-4672-9178-802BCA500C87", + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: getToken.SecretID, + }, + } + resp2 := &structs.CSIVolumeGetResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2) + require.NoError(t, err) + require.NotEqual(t, 0, resp2.Index) + require.Equal(t, vols[0].ID, resp2.Volume.ID) + + // Registration does not update + req1.Volumes[0].Driver = "adam" + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1) + require.Error(t, err, "exists") + + // Deregistration works + req3 := &structs.CSIVolumeDeregisterRequest{ + VolumeIDs: []string{"DEADBEEF-70AD-4672-9178-802BCA500C87"}, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + AuthToken: validToken.SecretID, + }, + } + resp3 := &structs.CSIVolumeDeregisterResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Deregister", req3, resp3) + require.NoError(t, err) + + // Volume is missing + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2) + require.Error(t, err, "missing") +} + +func TestCSIVolumeEndpoint_List(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + ms := "altNamespace" + + state := srv.fsm.State() + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + srv.config.ACLEnabled = true + + policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) + nsTok := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) + codec := rpcClient(t, srv) + + // Create the volume + vols := []*structs.CSIVolume{{ + ID: "DEADBEEF-70AD-4672-9178-802BCA500C87", + Namespace: ns, + AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + Driver: "minnie", + }, { + ID: "BAADF00D-70AD-4672-9178-802BCA500C87", + Namespace: ns, + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + Driver: "adam", + }, { + ID: "BEADCEED-70AD-4672-9178-802BCA500C87", + Namespace: ms, + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + Driver: "paddy", + }} + err := state.CSIVolumeRegister(0, vols) + require.NoError(t, err) + + var resp structs.CSIVolumeListResponse + + // Query all, ACL only allows ns + req := &structs.CSIVolumeListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: nsTok.SecretID, + }, + } + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp) + require.NoError(t, err) + require.NotEqual(t, 0, resp.Index) + require.Equal(t, 2, len(resp.Volumes)) + ids := map[string]bool{vols[0].ID: true, vols[1].ID: true} + for _, v := range resp.Volumes { + delete(ids, v.ID) + } + require.Equal(t, 0, len(ids)) + + // Query by Driver + req = &structs.CSIVolumeListRequest{ + Driver: "adam", + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: ns, + AuthToken: nsTok.SecretID, + }, + } + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp) + require.NoError(t, err) + require.Equal(t, 1, len(resp.Volumes)) + require.Equal(t, vols[1].ID, resp.Volumes[0].ID) + + // Query by Driver, ACL filters all results + req = &structs.CSIVolumeListRequest{ + Driver: "paddy", + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: ms, + AuthToken: nsTok.SecretID, + }, + } + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp) + require.NoError(t, err) + require.Equal(t, 0, len(resp.Volumes)) +} From 04b6e7c7fbbe1154671882d074141de725a84496 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Wed, 6 Nov 2019 16:51:16 -0500 Subject: [PATCH 027/126] server: rpc register CSIVolume --- nomad/server.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nomad/server.go b/nomad/server.go index 0fb95e05d..9dda975bf 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -249,6 +249,7 @@ type endpoints struct { Eval *Eval Plan *Plan Alloc *Alloc + CSIVolume *CSIVolume Deployment *Deployment Region *Region Search *Search @@ -1092,6 +1093,7 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { s.staticEndpoints.Eval = &Eval{srv: s, logger: s.logger.Named("eval")} s.staticEndpoints.Job = NewJobEndpoints(s) s.staticEndpoints.Node = &Node{srv: s, logger: s.logger.Named("client")} // Add but don't register + s.staticEndpoints.CSIVolume = &CSIVolume{srv: s, logger: s.logger.Named("csi_volume")} s.staticEndpoints.Deployment = &Deployment{srv: s, logger: s.logger.Named("deployment")} s.staticEndpoints.Operator = &Operator{srv: s, logger: s.logger.Named("operator")} s.staticEndpoints.Periodic = &Periodic{srv: s, logger: s.logger.Named("periodic")} @@ -1120,6 +1122,7 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { server.Register(s.staticEndpoints.Alloc) server.Register(s.staticEndpoints.Eval) server.Register(s.staticEndpoints.Job) + server.Register(s.staticEndpoints.CSIVolume) server.Register(s.staticEndpoints.Deployment) server.Register(s.staticEndpoints.Operator) server.Register(s.staticEndpoints.Periodic) From 41cbd55657ef4710bba1f0d33f12a16019d10af0 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Thu, 9 Jan 2020 11:14:07 -0500 Subject: [PATCH 028/126] client structs: use nstructs rather than s for nomad/structs --- client/client_csi_endpoint.go | 6 +++--- client/client_csi_endpoint_test.go | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/client/client_csi_endpoint.go b/client/client_csi_endpoint.go index 2985e6b6b..6d3efab5b 100644 --- a/client/client_csi_endpoint.go +++ b/client/client_csi_endpoint.go @@ -9,7 +9,7 @@ import ( metrics "github.com/armon/go-metrics" "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/structs" - s "github.com/hashicorp/nomad/nomad/structs" + nstructs "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/csi" ) @@ -59,11 +59,11 @@ func (c *ClientCSI) CSIControllerAttachVolume(req *structs.ClientCSIControllerAt return errors.New("NodeID is required") } - if !s.ValidCSIVolumeAccessMode(req.AccessMode) { + if !nstructs.ValidCSIVolumeAccessMode(req.AccessMode) { return fmt.Errorf("Unknown access mode: %v", req.AccessMode) } - if !s.ValidCSIVolumeAttachmentMode(req.AttachmentMode) { + if !nstructs.ValidCSIVolumeAttachmentMode(req.AttachmentMode) { return fmt.Errorf("Unknown attachment mode: %v", req.AttachmentMode) } diff --git a/client/client_csi_endpoint_test.go b/client/client_csi_endpoint_test.go index cac3e36fc..7af35aa49 100644 --- a/client/client_csi_endpoint_test.go +++ b/client/client_csi_endpoint_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/structs" - s "github.com/hashicorp/nomad/nomad/structs" + nstructs "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/csi" "github.com/hashicorp/nomad/plugins/csi/fake" "github.com/stretchr/testify/require" @@ -56,7 +56,7 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", - AccessMode: s.CSIVolumeAccessMode("foo"), + AccessMode: nstructs.CSIVolumeAccessMode("foo"), }, ExpectedErr: errors.New("Unknown access mode: foo"), }, @@ -66,8 +66,8 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", - AccessMode: s.CSIVolumeAccessModeMultiNodeReader, - AttachmentMode: s.CSIVolumeAttachmentMode("bar"), + AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), }, ExpectedErr: errors.New("Unknown attachment mode: bar"), }, @@ -80,8 +80,8 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", - AccessMode: s.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: s.CSIVolumeAttachmentModeFilesystem, + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, }, ExpectedErr: errors.New("hello"), }, @@ -94,8 +94,8 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", - AccessMode: s.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: s.CSIVolumeAttachmentModeFilesystem, + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, }, ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{}, }, @@ -110,8 +110,8 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { PluginName: fakePlugin.Name, VolumeID: "1234-4321-1234-4321", NodeID: "abcde", - AccessMode: s.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: s.CSIVolumeAttachmentModeFilesystem, + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, }, ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{ PublishContext: map[string]string{"foo": "bar"}, From e922531aaf25c5720662f5194864edb774cdf8c3 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Thu, 9 Jan 2020 13:39:18 -0500 Subject: [PATCH 029/126] structs: move the content of csi_volumes into csi --- nomad/structs/csi.go | 340 +++++++++++++++++ .../{csi_volumes_test.go => csi_test.go} | 0 nomad/structs/csi_volumes.go | 341 ------------------ 3 files changed, 340 insertions(+), 341 deletions(-) rename nomad/structs/{csi_volumes_test.go => csi_test.go} (100%) delete mode 100644 nomad/structs/csi_volumes.go diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index b00b2fa72..d511e9e75 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -1,5 +1,11 @@ package structs +import ( + "fmt" + "strings" + "time" +) + // CSISocketName is the filename that Nomad expects plugins to create inside the // PluginMountDir. const CSISocketName = "csi.sock" @@ -8,6 +14,9 @@ const CSISocketName = "csi.sock" // where Nomad will expect plugins to create intermediary mounts for volumes. const CSIIntermediaryDirname = "volumes" +// VolumeTypeCSI is the type in the volume stanza of a TaskGroup +const VolumeTypeCSI = "csi" + // CSIPluginType is an enum string that encapsulates the valid options for a // CSIPlugin stanza's Type. These modes will allow the plugin to be used in // different ways by the client. @@ -66,3 +75,334 @@ func (t *TaskCSIPluginConfig) Copy() *TaskCSIPluginConfig { return nt } + +// CSIVolumeAttachmentMode chooses the type of storage api that will be used to +// interact with the device. +type CSIVolumeAttachmentMode string + +const ( + CSIVolumeAttachmentModeUnknown CSIVolumeAttachmentMode = "" + CSIVolumeAttachmentModeBlockDevice CSIVolumeAttachmentMode = "block-device" + CSIVolumeAttachmentModeFilesystem CSIVolumeAttachmentMode = "file-system" +) + +func ValidCSIVolumeAttachmentMode(attachmentMode CSIVolumeAttachmentMode) bool { + switch attachmentMode { + case CSIVolumeAttachmentModeBlockDevice, CSIVolumeAttachmentModeFilesystem: + return true + default: + return false + } +} + +// CSIVolumeAccessMode indicates how a volume should be used in a storage topology +// e.g whether the provider should make the volume available concurrently. +type CSIVolumeAccessMode string + +const ( + CSIVolumeAccessModeUnknown CSIVolumeAccessMode = "" + + CSIVolumeAccessModeSingleNodeReader CSIVolumeAccessMode = "single-node-reader-only" + CSIVolumeAccessModeSingleNodeWriter CSIVolumeAccessMode = "single-node-writer" + + CSIVolumeAccessModeMultiNodeReader CSIVolumeAccessMode = "multi-node-reader-only" + CSIVolumeAccessModeMultiNodeSingleWriter CSIVolumeAccessMode = "multi-node-single-writer" + CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" +) + +// ValidCSIVolumeAccessMode checks to see that the provided access mode is a valid, +// non-empty access mode. +func ValidCSIVolumeAccessMode(accessMode CSIVolumeAccessMode) bool { + switch accessMode { + case CSIVolumeAccessModeSingleNodeReader, CSIVolumeAccessModeSingleNodeWriter, + CSIVolumeAccessModeMultiNodeReader, CSIVolumeAccessModeMultiNodeSingleWriter, + CSIVolumeAccessModeMultiNodeMultiWriter: + return true + default: + return false + } +} + +// ValidCSIVolumeAccessMode checks for a writable access mode +func ValidCSIVolumeWriteAccessMode(accessMode CSIVolumeAccessMode) bool { + switch accessMode { + case CSIVolumeAccessModeSingleNodeWriter, + CSIVolumeAccessModeMultiNodeSingleWriter, + CSIVolumeAccessModeMultiNodeMultiWriter: + return true + default: + return false + } +} + +type CSIVolume struct { + ID string + Driver string + Namespace string + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode + + // Allocations, tracking claim status + ReadAllocs map[string]*Allocation + WriteAllocs map[string]*Allocation + PastAllocs map[string]*Allocation + + // Healthy is true if all the denormalized plugin health fields are true, and the + // volume has not been marked for garbage collection + Healthy bool + VolumeGC time.Time + ControllerName string + ControllerHealthy bool + Controller []*Job + NodeHealthy int + NodeExpected int + ResourceExhausted time.Time + + CreatedIndex uint64 + ModifiedIndex uint64 +} + +type CSIVolListStub struct { + ID string + Driver string + Namespace string + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode + CurrentReaders int + CurrentWriters int + Healthy bool + VolumeGC time.Time + ControllerName string + ControllerHealthy bool + NodeHealthy int + NodeExpected int + CreatedIndex uint64 + ModifiedIndex uint64 +} + +func CreateCSIVolume(controllerName string) *CSIVolume { + return &CSIVolume{ + ControllerName: controllerName, + ReadAllocs: map[string]*Allocation{}, + WriteAllocs: map[string]*Allocation{}, + PastAllocs: map[string]*Allocation{}, + Topologies: []*CSITopology{}, + } +} + +func (v *CSIVolume) Stub() *CSIVolListStub { + stub := CSIVolListStub{ + ID: v.ID, + Driver: v.Driver, + Namespace: v.Namespace, + Topologies: v.Topologies, + AccessMode: v.AccessMode, + AttachmentMode: v.AttachmentMode, + CurrentReaders: len(v.ReadAllocs), + CurrentWriters: len(v.WriteAllocs), + Healthy: v.Healthy, + VolumeGC: v.VolumeGC, + ControllerName: v.ControllerName, + ControllerHealthy: v.ControllerHealthy, + NodeHealthy: v.NodeHealthy, + NodeExpected: v.NodeExpected, + CreatedIndex: v.CreatedIndex, + ModifiedIndex: v.ModifiedIndex, + } + + return &stub +} + +func (v *CSIVolume) CanReadOnly() bool { + if !v.Healthy { + return false + } + + return v.ResourceExhausted == time.Time{} +} + +func (v *CSIVolume) CanWrite() bool { + if !v.Healthy { + return false + } + + switch v.AccessMode { + case CSIVolumeAccessModeSingleNodeWriter, CSIVolumeAccessModeMultiNodeSingleWriter: + return len(v.WriteAllocs) == 0 + case CSIVolumeAccessModeMultiNodeMultiWriter: + return v.ResourceExhausted == time.Time{} + default: + return false + } +} + +func (v *CSIVolume) Claim(claim CSIVolumeClaimMode, alloc *Allocation) bool { + switch claim { + case CSIVolumeClaimRead: + return v.ClaimRead(alloc) + case CSIVolumeClaimWrite: + return v.ClaimWrite(alloc) + case CSIVolumeClaimRelease: + return v.ClaimRelease(alloc) + } + return false +} + +func (v *CSIVolume) ClaimRead(alloc *Allocation) bool { + if !v.CanReadOnly() { + return false + } + v.ReadAllocs[alloc.ID] = alloc + delete(v.WriteAllocs, alloc.ID) + delete(v.PastAllocs, alloc.ID) + return true +} + +func (v *CSIVolume) ClaimWrite(alloc *Allocation) bool { + if !v.CanWrite() { + return false + } + v.WriteAllocs[alloc.ID] = alloc + delete(v.ReadAllocs, alloc.ID) + delete(v.PastAllocs, alloc.ID) + return true +} + +func (v *CSIVolume) ClaimRelease(alloc *Allocation) bool { + delete(v.ReadAllocs, alloc.ID) + delete(v.WriteAllocs, alloc.ID) + v.PastAllocs[alloc.ID] = alloc + return true +} + +// GCAlloc is called on Allocation gc, by following the alloc's pointer back to the volume +func (v *CSIVolume) GCAlloc(alloc *Allocation) { + delete(v.ReadAllocs, alloc.ID) + delete(v.WriteAllocs, alloc.ID) + delete(v.PastAllocs, alloc.ID) +} + +// Equality by value +func (v *CSIVolume) Equal(o *CSIVolume) bool { + if v == nil || o == nil { + return v == o + } + + // Omit the plugin health fields, their values are controlled by plugin jobs + if v.ID == o.ID && + v.Driver == o.Driver && + v.Namespace == o.Namespace && + v.AccessMode == o.AccessMode && + v.AttachmentMode == o.AttachmentMode && + v.ControllerName == o.ControllerName { + // Setwise equality of topologies + var ok bool + for _, t := range v.Topologies { + ok = false + for _, u := range o.Topologies { + if t.Equal(u) { + ok = true + break + } + } + if !ok { + return false + } + } + return true + } + return false +} + +// Validate validates the volume struct, returning all validation errors at once +func (v *CSIVolume) Validate() error { + errs := []string{} + + if v.ID == "" { + errs = append(errs, "missing volume id") + } + if v.Driver == "" { + errs = append(errs, "missing driver") + } + if v.Namespace == "" { + errs = append(errs, "missing namespace") + } + if v.AccessMode == "" { + errs = append(errs, "missing access mode") + } + if v.AttachmentMode == "" { + errs = append(errs, "missing attachment mode") + } + + var ok bool + for _, t := range v.Topologies { + if t != nil && len(t.Segments) > 0 { + ok = true + break + } + } + if !ok { + errs = append(errs, "missing topology") + } + + if len(errs) > 0 { + return fmt.Errorf("validation: %s", strings.Join(errs, ", ")) + } + return nil +} + +// Request and response wrappers +type CSIVolumeRegisterRequest struct { + Volumes []*CSIVolume + WriteRequest +} + +type CSIVolumeRegisterResponse struct { + QueryMeta +} + +type CSIVolumeDeregisterRequest struct { + VolumeIDs []string + WriteRequest +} + +type CSIVolumeDeregisterResponse struct { + QueryMeta +} + +type CSIVolumeClaimMode int + +const ( + CSIVolumeClaimRead CSIVolumeClaimMode = iota + CSIVolumeClaimWrite + CSIVolumeClaimRelease +) + +type CSIVolumeClaimRequest struct { + VolumeID string + Allocation *Allocation + Claim CSIVolumeClaimMode + WriteRequest +} + +type CSIVolumeListRequest struct { + Driver string + QueryOptions +} + +type CSIVolumeListResponse struct { + Volumes []*CSIVolListStub + QueryMeta +} + +type CSIVolumeGetRequest struct { + ID string + QueryOptions +} + +type CSIVolumeGetResponse struct { + Volume *CSIVolume + QueryMeta +} diff --git a/nomad/structs/csi_volumes_test.go b/nomad/structs/csi_test.go similarity index 100% rename from nomad/structs/csi_volumes_test.go rename to nomad/structs/csi_test.go diff --git a/nomad/structs/csi_volumes.go b/nomad/structs/csi_volumes.go deleted file mode 100644 index 5b5cfe45f..000000000 --- a/nomad/structs/csi_volumes.go +++ /dev/null @@ -1,341 +0,0 @@ -package structs - -import ( - "fmt" - "strings" - "time" -) - -const ( - VolumeTypeCSI = "csi" -) - -// CSIVolumeAttachmentMode chooses the type of storage api that will be used to -// interact with the device. -type CSIVolumeAttachmentMode string - -const ( - CSIVolumeAttachmentModeUnknown CSIVolumeAttachmentMode = "" - CSIVolumeAttachmentModeBlockDevice CSIVolumeAttachmentMode = "block-device" - CSIVolumeAttachmentModeFilesystem CSIVolumeAttachmentMode = "file-system" -) - -func ValidCSIVolumeAttachmentMode(attachmentMode CSIVolumeAttachmentMode) bool { - switch attachmentMode { - case CSIVolumeAttachmentModeBlockDevice, CSIVolumeAttachmentModeFilesystem: - return true - default: - return false - } -} - -// CSIVolumeAccessMode indicates how a volume should be used in a storage topology -// e.g whether the provider should make the volume available concurrently. -type CSIVolumeAccessMode string - -const ( - CSIVolumeAccessModeUnknown CSIVolumeAccessMode = "" - - CSIVolumeAccessModeSingleNodeReader CSIVolumeAccessMode = "single-node-reader-only" - CSIVolumeAccessModeSingleNodeWriter CSIVolumeAccessMode = "single-node-writer" - - CSIVolumeAccessModeMultiNodeReader CSIVolumeAccessMode = "multi-node-reader-only" - CSIVolumeAccessModeMultiNodeSingleWriter CSIVolumeAccessMode = "multi-node-single-writer" - CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" -) - -// ValidCSIVolumeAccessMode checks to see that the provided access mode is a valid, -// non-empty access mode. -func ValidCSIVolumeAccessMode(accessMode CSIVolumeAccessMode) bool { - switch accessMode { - case CSIVolumeAccessModeSingleNodeReader, CSIVolumeAccessModeSingleNodeWriter, - CSIVolumeAccessModeMultiNodeReader, CSIVolumeAccessModeMultiNodeSingleWriter, - CSIVolumeAccessModeMultiNodeMultiWriter: - return true - default: - return false - } -} - -func ValidCSIVolumeWriteAccessMode(accessMode CSIVolumeAccessMode) bool { - switch accessMode { - case CSIVolumeAccessModeSingleNodeWriter, - CSIVolumeAccessModeMultiNodeSingleWriter, - CSIVolumeAccessModeMultiNodeMultiWriter: - return true - default: - return false - } -} - -type CSIVolume struct { - ID string - Driver string - Namespace string - Topologies []*CSITopology - AccessMode CSIVolumeAccessMode - AttachmentMode CSIVolumeAttachmentMode - - // Allocations, tracking claim status - ReadAllocs map[string]*Allocation - WriteAllocs map[string]*Allocation - PastAllocs map[string]*Allocation - - // Healthy is true iff all the denormalized plugin health fields are true, and the - // volume has not been marked for garbage collection - Healthy bool - VolumeGC time.Time - ControllerName string - ControllerHealthy bool - Controller []*Job - NodeHealthy int - NodeExpected int - ResourceExhausted time.Time - - CreatedIndex uint64 - ModifiedIndex uint64 -} - -type CSIVolListStub struct { - ID string - Driver string - Namespace string - Topologies []*CSITopology - AccessMode CSIVolumeAccessMode - AttachmentMode CSIVolumeAttachmentMode - CurrentReaders int - CurrentWriters int - Healthy bool - VolumeGC time.Time - ControllerName string - ControllerHealthy bool - NodeHealthy int - NodeExpected int - CreatedIndex uint64 - ModifiedIndex uint64 -} - -func CreateCSIVolume(controllerName string) *CSIVolume { - return &CSIVolume{ - ControllerName: controllerName, - ReadAllocs: map[string]*Allocation{}, - WriteAllocs: map[string]*Allocation{}, - PastAllocs: map[string]*Allocation{}, - Topologies: []*CSITopology{}, - } -} - -func (v *CSIVolume) Stub() *CSIVolListStub { - stub := CSIVolListStub{ - ID: v.ID, - Driver: v.Driver, - Namespace: v.Namespace, - Topologies: v.Topologies, - AccessMode: v.AccessMode, - AttachmentMode: v.AttachmentMode, - CurrentReaders: len(v.ReadAllocs), - CurrentWriters: len(v.WriteAllocs), - Healthy: v.Healthy, - VolumeGC: v.VolumeGC, - ControllerName: v.ControllerName, - ControllerHealthy: v.ControllerHealthy, - NodeHealthy: v.NodeHealthy, - NodeExpected: v.NodeExpected, - CreatedIndex: v.CreatedIndex, - ModifiedIndex: v.ModifiedIndex, - } - - return &stub -} - -func (v *CSIVolume) CanReadOnly() bool { - if !v.Healthy { - return false - } - - return v.ResourceExhausted == time.Time{} -} - -func (v *CSIVolume) CanWrite() bool { - if !v.Healthy { - return false - } - - switch v.AccessMode { - case CSIVolumeAccessModeSingleNodeWriter, CSIVolumeAccessModeMultiNodeSingleWriter: - return len(v.WriteAllocs) == 0 - case CSIVolumeAccessModeMultiNodeMultiWriter: - return v.ResourceExhausted == time.Time{} - default: - return false - } -} - -func (v *CSIVolume) Claim(claim CSIVolumeClaimMode, alloc *Allocation) bool { - switch claim { - case CSIVolumeClaimRead: - return v.ClaimRead(alloc) - case CSIVolumeClaimWrite: - return v.ClaimWrite(alloc) - case CSIVolumeClaimRelease: - return v.ClaimRelease(alloc) - } - return false -} - -func (v *CSIVolume) ClaimRead(alloc *Allocation) bool { - if !v.CanReadOnly() { - return false - } - v.ReadAllocs[alloc.ID] = alloc - delete(v.WriteAllocs, alloc.ID) - delete(v.PastAllocs, alloc.ID) - return true -} - -func (v *CSIVolume) ClaimWrite(alloc *Allocation) bool { - if !v.CanWrite() { - return false - } - v.WriteAllocs[alloc.ID] = alloc - delete(v.ReadAllocs, alloc.ID) - delete(v.PastAllocs, alloc.ID) - return true -} - -func (v *CSIVolume) ClaimRelease(alloc *Allocation) bool { - delete(v.ReadAllocs, alloc.ID) - delete(v.WriteAllocs, alloc.ID) - v.PastAllocs[alloc.ID] = alloc - return true -} - -// GCAlloc is called on Allocation gc, by following the alloc's pointer back to the volume -func (v *CSIVolume) GCAlloc(alloc *Allocation) { - delete(v.ReadAllocs, alloc.ID) - delete(v.WriteAllocs, alloc.ID) - delete(v.PastAllocs, alloc.ID) -} - -// Equality by value -func (v *CSIVolume) Equal(o *CSIVolume) bool { - if v == nil || o == nil { - return v == o - } - - // Omit the plugin health fields, their values are controlled by plugin jobs - if v.ID == o.ID && - v.Driver == o.Driver && - v.Namespace == o.Namespace && - v.AccessMode == o.AccessMode && - v.AttachmentMode == o.AttachmentMode && - v.ControllerName == o.ControllerName { - // Setwise equality of topologies - var ok bool - for _, t := range v.Topologies { - ok = false - for _, u := range o.Topologies { - if t.Equal(u) { - ok = true - break - } - } - if !ok { - return false - } - } - return true - } - return false -} - -// Validate validates the volume struct, returning all validation errors at once -func (v *CSIVolume) Validate() error { - errs := []string{} - - if v.ID == "" { - errs = append(errs, "missing volume id") - } - if v.Driver == "" { - errs = append(errs, "missing driver") - } - if v.Namespace == "" { - errs = append(errs, "missing namespace") - } - if v.AccessMode == "" { - errs = append(errs, "missing access mode") - } - if v.AttachmentMode == "" { - errs = append(errs, "missing attachment mode") - } - - var ok bool - for _, t := range v.Topologies { - if t != nil && len(t.Segments) > 0 { - ok = true - break - } - } - if !ok { - errs = append(errs, "missing topology") - } - - if len(errs) > 0 { - return fmt.Errorf("validation: %s", strings.Join(errs, ", ")) - } - return nil -} - -// Request and response wrappers -type CSIVolumeRegisterRequest struct { - Volumes []*CSIVolume - WriteRequest -} - -type CSIVolumeRegisterResponse struct { - QueryMeta -} - -type CSIVolumeDeregisterRequest struct { - VolumeIDs []string - WriteRequest -} - -type CSIVolumeDeregisterResponse struct { - QueryMeta -} - -type CSIVolumeClaimMode int - -const ( - CSIVolumeClaimRead CSIVolumeClaimMode = iota - CSIVolumeClaimWrite - CSIVolumeClaimRelease -) - -type CSIVolumeClaimRequest struct { - VolumeID string - Allocation *Allocation - Claim CSIVolumeClaimMode - WriteRequest -} - -type CSIVolumeListRequest struct { - Driver string - QueryOptions -} - -type CSIVolumeListResponse struct { - Volumes []*CSIVolListStub - QueryMeta -} - -type CSIVolumeGetRequest struct { - ID string - QueryOptions -} - -type CSIVolumeGetResponse struct { - Volume *CSIVolume - QueryMeta -} From 5b31b140c3b08ddeed424ec0508f43b1ddfbdd30 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Thu, 9 Jan 2020 16:28:14 -0500 Subject: [PATCH 030/126] csi: do not use namespace specific identifiers --- nomad/csi_volume_endpoint.go | 13 ++++------ nomad/fsm.go | 4 +-- nomad/state/schema.go | 24 +++--------------- nomad/state/state_store.go | 45 ++++++++++++--------------------- nomad/state/state_store_test.go | 16 ++++++------ 5 files changed, 35 insertions(+), 67 deletions(-) diff --git a/nomad/csi_volume_endpoint.go b/nomad/csi_volume_endpoint.go index 846cee249..e10b9a55e 100644 --- a/nomad/csi_volume_endpoint.go +++ b/nomad/csi_volume_endpoint.go @@ -98,12 +98,10 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV var err error var iter memdb.ResultIterator - if ns == "" && args.Driver == "" { - iter, err = state.CSIVolumes(ws) - } else if args.Driver != "" { - iter, err = state.CSIVolumesByNSDriver(ws, args.Namespace, args.Driver) + if args.Driver != "" { + iter, err = state.CSIVolumesByDriver(ws, args.Driver) } else { - iter, err = state.CSIVolumesByNS(ws, args.Namespace) + iter, err = state.CSIVolumes(ws) } if err != nil { @@ -161,12 +159,11 @@ func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVol metricsStart := time.Now() defer metrics.MeasureSince([]string{"nomad", "volume", "get"}, metricsStart) - ns := args.RequestNamespace() opts := blockingOptions{ queryOpts: &args.QueryOptions, queryMeta: &reply.QueryMeta, run: func(ws memdb.WatchSet, state *state.StateStore) error { - vol, err := state.CSIVolumeByID(ws, ns, args.ID) + vol, err := state.CSIVolumeByID(ws, args.ID) if err != nil { return err } @@ -246,7 +243,7 @@ func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply * return err } - err = state.CSIVolumeDeregister(index, ns, args.VolumeIDs) + err = state.CSIVolumeDeregister(index, args.VolumeIDs) if err != nil { return err } diff --git a/nomad/fsm.go b/nomad/fsm.go index 1b47c5dc5..38fecb3a5 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -1142,7 +1142,7 @@ func (n *nomadFSM) applyCSIVolumeDeregister(buf []byte, index uint64) interface{ } defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_deregister"}, time.Now()) - if err := n.state.CSIVolumeDeregister(index, req.Namespace, req.VolumeIDs); err != nil { + if err := n.state.CSIVolumeDeregister(index, req.VolumeIDs); err != nil { n.logger.Error("CSIVolumeDeregister failed", "error", err) return err } @@ -1157,7 +1157,7 @@ func (n *nomadFSM) applyCSIVolumeClaim(buf []byte, index uint64) interface{} { } defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_claim"}, time.Now()) - if err := n.state.CSIVolumeClaim(index, req.Namespace, req.VolumeID, req.Allocation, req.Claim); err != nil { + if err := n.state.CSIVolumeClaim(index, req.VolumeID, req.Allocation, req.Claim); err != nil { n.logger.Error("CSIVolumeClaim failed", "error", err) return err } diff --git a/nomad/state/schema.go b/nomad/state/schema.go index ffef13f0e..3ea7321b8 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -690,32 +690,16 @@ func csiVolumeTableSchema() *memdb.TableSchema { Name: "id", AllowMissing: false, Unique: true, - - // Use a compound so (Namespace, ID) is unique - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Namespace", - }, - &memdb.StringFieldIndex{ - Field: "ID", - }, - }, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", }, }, "driver": { Name: "driver", AllowMissing: false, Unique: false, - Indexer: &memdb.CompoundIndex{ - Indexes: []memdb.Indexer{ - &memdb.StringFieldIndex{ - Field: "Namespace", - }, - &memdb.StringFieldIndex{ - Field: "Driver", - }, - }, + Indexer: &memdb.StringFieldIndex{ + Field: "Driver", }, }, }, diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 3d3b36197..74e1085ee 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1518,7 +1518,7 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum for _, v := range volumes { // Check for volume existence - _, obj, err := txn.FirstWatch("csi_volumes", "id", v.Namespace, v.ID) + _, obj, err := txn.FirstWatch("csi_volumes", "id", v.ID) if err != nil { return fmt.Errorf("volume existence check: %v", err) } @@ -1537,12 +1537,12 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum } // CSIVolumeByID is used to lookup a single volume -func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, namespace, id string) (*structs.CSIVolume, error) { +func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, id string) (*structs.CSIVolume, error) { txn := s.db.Txn(false) - watchCh, obj, err := txn.FirstWatch("csi_volumes", "id", namespace, id) + watchCh, obj, err := txn.FirstWatch("csi_volumes", "id", id) if err != nil { - return nil, fmt.Errorf("volume lookup failed: %s %s %v", namespace, id, err) + return nil, fmt.Errorf("volume lookup failed: %s %v", id, err) } ws.Add(watchCh) @@ -1555,23 +1555,10 @@ func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, namespace, id string) (*st } // CSIVolumes looks up the entire csi_volumes table -func (s *StateStore) CSIVolumesByNS(ws memdb.WatchSet, namespace string) (memdb.ResultIterator, error) { +func (s *StateStore) CSIVolumesByDriver(ws memdb.WatchSet, driver string) (memdb.ResultIterator, error) { txn := s.db.Txn(false) - iter, err := txn.Get("csi_volumes", "id_prefix", namespace) - if err != nil { - return nil, fmt.Errorf("volume lookup failed: %v", err) - } - ws.Add(iter.WatchCh()) - - return iter, nil -} - -// CSIVolumes looks up the entire csi_volumes table -func (s *StateStore) CSIVolumesByNSDriver(ws memdb.WatchSet, namespace, driver string) (memdb.ResultIterator, error) { - txn := s.db.Txn(false) - - iter, err := txn.Get("csi_volumes", "driver", namespace, driver) + iter, err := txn.Get("csi_volumes", "driver", driver) if err != nil { return nil, fmt.Errorf("volume lookup failed: %v", err) } @@ -1594,16 +1581,16 @@ func (s *StateStore) CSIVolumes(ws memdb.WatchSet) (memdb.ResultIterator, error) } // CSIVolumeClaim updates the volume's claim count and allocation list -func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, alloc *structs.Allocation, claim structs.CSIVolumeClaimMode) error { +func (s *StateStore) CSIVolumeClaim(index uint64, id string, alloc *structs.Allocation, claim structs.CSIVolumeClaimMode) error { txn := s.db.Txn(true) defer txn.Abort() - row, err := txn.First("csi_volumes", "id", namespace, id) + row, err := txn.First("csi_volumes", "id", id) if err != nil { - return fmt.Errorf("volume lookup failed: %s %s: %v", namespace, id, err) + return fmt.Errorf("volume lookup failed: %s: %v", id, err) } if row == nil { - return fmt.Errorf("volume not found: %s %s", namespace, id) + return fmt.Errorf("volume not found: %s", id) } volume, ok := row.(*structs.CSIVolume) @@ -1616,7 +1603,7 @@ func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, alloc *s } if err = txn.Insert("csi_volumes", volume); err != nil { - return fmt.Errorf("volume update failed: %s %s: %v", namespace, id, err) + return fmt.Errorf("volume update failed: %s: %v", id, err) } txn.Commit() @@ -1624,22 +1611,22 @@ func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, alloc *s } // CSIVolumeDeregister removes the volume from the server -func (s *StateStore) CSIVolumeDeregister(index uint64, namespace string, ids []string) error { +func (s *StateStore) CSIVolumeDeregister(index uint64, ids []string) error { txn := s.db.Txn(true) defer txn.Abort() for _, id := range ids { - existing, err := txn.First("csi_volumes", "id", namespace, id) + existing, err := txn.First("csi_volumes", "id", id) if err != nil { - return fmt.Errorf("volume lookup failed: %s %s: %v", namespace, id, err) + return fmt.Errorf("volume lookup failed: %s: %v", id, err) } if existing == nil { - return fmt.Errorf("volume not found: %s %s", namespace, id) + return fmt.Errorf("volume not found: %s", id) } if err = txn.Delete("csi_volumes", existing); err != nil { - return fmt.Errorf("volume delete failed: %s %s: %v", namespace, id, err) + return fmt.Errorf("volume delete failed: %s: %v", id, err) } } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index a074c4ac3..b5666927e 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2869,18 +2869,18 @@ func TestStateStore_CSIVolume(t *testing.T) { require.Equal(t, 2, len(vs)) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByNSDriver(ws, "default", "minnie") + iter, err = state.CSIVolumesByDriver(ws, "minnie") require.NoError(t, err) vs = slurp(iter) require.Equal(t, 1, len(vs)) - err = state.CSIVolumeDeregister(1, "default", []string{ + err = state.CSIVolumeDeregister(1, []string{ "BAADF00D-70AD-4672-9178-802BCA500C87", }) require.NoError(t, err) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByNSDriver(ws, "default", "adam") + iter, err = state.CSIVolumesByDriver(ws, "adam") require.NoError(t, err) vs = slurp(iter) require.Equal(t, 0, len(vs)) @@ -2898,21 +2898,21 @@ func TestStateStore_CSIVolume(t *testing.T) { w := structs.CSIVolumeClaimWrite u := structs.CSIVolumeClaimRelease - err = state.CSIVolumeClaim(2, "default", "DEADBEEF-70AD-4672-9178-802BCA500C87", a0, r) + err = state.CSIVolumeClaim(2, "DEADBEEF-70AD-4672-9178-802BCA500C87", a0, r) require.NoError(t, err) - err = state.CSIVolumeClaim(2, "default", "DEADBEEF-70AD-4672-9178-802BCA500C87", a1, w) + err = state.CSIVolumeClaim(2, "DEADBEEF-70AD-4672-9178-802BCA500C87", a1, w) require.NoError(t, err) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByNSDriver(ws, "default", "minnie") + iter, err = state.CSIVolumesByDriver(ws, "minnie") require.NoError(t, err) vs = slurp(iter) require.False(t, vs[0].CanWrite()) - err = state.CSIVolumeClaim(2, "default", "DEADBEEF-70AD-4672-9178-802BCA500C87", a0, u) + err = state.CSIVolumeClaim(2, "DEADBEEF-70AD-4672-9178-802BCA500C87", a0, u) require.NoError(t, err) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByNSDriver(ws, "default", "minnie") + iter, err = state.CSIVolumesByDriver(ws, "minnie") require.NoError(t, err) vs = slurp(iter) require.True(t, vs[0].CanReadOnly()) From 6106a388e605a8c7028b65d2831ce587f1e03226 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Wed, 18 Dec 2019 13:14:42 -0500 Subject: [PATCH 031/126] api: csi --- api/csi.go | 152 ++++++++++++++++++++++++++++++++++++++++++++++++ api/csi_test.go | 69 ++++++++++++++++++++++ 2 files changed, 221 insertions(+) create mode 100644 api/csi.go create mode 100644 api/csi_test.go diff --git a/api/csi.go b/api/csi.go new file mode 100644 index 000000000..5168614ef --- /dev/null +++ b/api/csi.go @@ -0,0 +1,152 @@ +package api + +import ( + "sort" + "time" +) + +// CSIVolumes is used to query the top level csi volumes +type CSIVolumes struct { + client *Client +} + +// CSIVolumes returns a handle on the allocs endpoints. +func (c *Client) CSIVolumes() *CSIVolumes { + return &CSIVolumes{client: c} +} + +// List returns all CSI volumes, ignoring driver +func (v *CSIVolumes) List(q *QueryOptions) ([]*CSIVolumeListStub, *QueryMeta, error) { + var resp []*CSIVolumeListStub + qm, err := v.client.query("/v1/csi/volumes", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(CSIVolumeIndexSort(resp)) + return resp, qm, nil +} + +// DriverList returns all CSI volumes for the specified driver +func (v *CSIVolumes) DriverList(driver string) ([]*CSIVolumeListStub, *QueryMeta, error) { + return v.List(&QueryOptions{Prefix: driver}) +} + +// Info is used to retrieve a single allocation. +func (v *CSIVolumes) Info(id string, q *QueryOptions) (*CSIVolume, *QueryMeta, error) { + var resp CSIVolume + qm, err := v.client.query("/v1/csi/volume/"+id, &resp, q) + if err != nil { + return nil, nil, err + } + return &resp, qm, nil +} + +func (v *CSIVolumes) Register(vol *CSIVolume, w *WriteOptions) error { + req := CSIVolumeRegisterRequest{ + Volumes: []*CSIVolume{vol}, + } + var resp struct{} + _, err := v.client.write("/v1/csi/volume/"+vol.ID, req, &resp, w) + return err +} + +func (v *CSIVolumes) Deregister(id string, w *WriteOptions) error { + _, err := v.client.delete("/v1/csi/volume/"+id, nil, w) + return err +} + +// CSIVolumeAttachmentMode duplicated in nomad/structs/csi.go +type CSIVolumeAttachmentMode string + +const ( + CSIVolumeAttachmentModeUnknown CSIVolumeAttachmentMode = "" + CSIVolumeAttachmentModeBlockDevice CSIVolumeAttachmentMode = "block-device" + CSIVolumeAttachmentModeFilesystem CSIVolumeAttachmentMode = "file-system" +) + +// CSIVolumeAccessMode duplicated in nomad/structs/csi.go +type CSIVolumeAccessMode string + +const ( + CSIVolumeAccessModeUnknown CSIVolumeAccessMode = "" + + CSIVolumeAccessModeSingleNodeReader CSIVolumeAccessMode = "single-node-reader-only" + CSIVolumeAccessModeSingleNodeWriter CSIVolumeAccessMode = "single-node-writer" + + CSIVolumeAccessModeMultiNodeReader CSIVolumeAccessMode = "multi-node-reader-only" + CSIVolumeAccessModeMultiNodeSingleWriter CSIVolumeAccessMode = "multi-node-single-writer" + CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" +) + +// CSIVolume is used for serialization, see also nomad/structs/csi.go +type CSIVolume struct { + ID string + Driver string + Namespace string + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode + + // Combine structs.{Read,Write,Past}Allocs + Allocations []*AllocationListStub + + // Healthy is true iff all the denormalized plugin health fields are true, and the + // volume has not been marked for garbage collection + Healthy bool + VolumeGC time.Time + ControllerName string + ControllerHealthy bool + NodeHealthy int + NodeExpected int + ResourceExhausted time.Time + + CreatedIndex uint64 + ModifiedIndex uint64 +} + +type CSIVolumeIndexSort []*CSIVolumeListStub + +func (v CSIVolumeIndexSort) Len() int { + return len(v) +} + +func (v CSIVolumeIndexSort) Less(i, j int) bool { + return v[i].CreatedIndex > v[j].CreatedIndex +} + +func (v CSIVolumeIndexSort) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} + +// CSIVolumeListStub omits allocations. See also nomad/structs/csi.go +type CSIVolumeListStub struct { + ID string + Driver string + Namespace string + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode + + // Healthy is true iff all the denormalized plugin health fields are true, and the + // volume has not been marked for garbage collection + Healthy bool + VolumeGC time.Time + ControllerName string + ControllerHealthy bool + NodeHealthy int + NodeExpected int + ResourceExhausted time.Time + + CreatedIndex uint64 + ModifiedIndex uint64 +} + +type CSIVolumeRegisterRequest struct { + Volumes []*CSIVolume + WriteRequest +} + +type CSIVolumeDeregisterRequest struct { + VolumeIDs []string + WriteRequest +} diff --git a/api/csi_test.go b/api/csi_test.go new file mode 100644 index 000000000..4e13ed4f9 --- /dev/null +++ b/api/csi_test.go @@ -0,0 +1,69 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCSIVolumes_CRUD(t *testing.T) { + t.Parallel() + c, s, root := makeACLClient(t, nil, nil) + defer s.Stop() + v := c.CSIVolumes() + + // Successful empty result + vols, qm, err := v.List(nil) + assert.NoError(t, err) + assert.NotEqual(t, 0, qm.LastIndex) + assert.Equal(t, 0, len(vols)) + + // Authorized QueryOpts. Use the root token to just bypass ACL details + opts := &QueryOptions{ + Region: "global", + Namespace: "default", + AuthToken: root.SecretID, + } + + wpts := &WriteOptions{ + Region: "global", + Namespace: "default", + AuthToken: root.SecretID, + } + + // Register a volume + v.Register(&CSIVolume{ + ID: "DEADBEEF-63C7-407F-AE82-C99FBEF78FEB", + Driver: "minnie", + Namespace: "default", + AccessMode: CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: CSIVolumeAttachmentModeFilesystem, + Topologies: []*CSITopology{{Segments: map[string]string{"foo": "bar"}}}, + }, wpts) + + // Successful result with volumes + vols, qm, err = v.List(opts) + assert.NoError(t, err) + assert.NotEqual(t, 0, qm.LastIndex) + assert.Equal(t, 1, len(vols)) + + // Successful info query + vol, qm, err := v.Info("DEADBEEF-63C7-407F-AE82-C99FBEF78FEB", opts) + assert.NoError(t, err) + assert.Equal(t, "minnie", vol.Driver) + assert.Equal(t, "bar", vol.Topologies[0].Segments["foo"]) + + // Deregister the volume + err = v.Deregister("DEADBEEF-63C7-407F-AE82-C99FBEF78FEB", wpts) + assert.NoError(t, err) + + // Successful empty result + vols, qm, err = v.List(nil) + assert.NoError(t, err) + assert.NotEqual(t, 0, qm.LastIndex) + assert.Equal(t, 0, len(vols)) + + // Failed info query + vol, qm, err = v.Info("DEADBEEF-63C7-407F-AE82-C99FBEF78FEB", opts) + assert.Error(t, err, "missing") +} From 2f646fa5e9ad9617ab2f49adbae45851b5643b63 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Wed, 18 Dec 2019 17:20:30 -0500 Subject: [PATCH 032/126] agent: csi endpoint --- command/agent/csi_endpoint.go | 118 ++++++++++++++++++++++++++++++++++ command/agent/http.go | 3 + 2 files changed, 121 insertions(+) create mode 100644 command/agent/csi_endpoint.go diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go new file mode 100644 index 000000000..705157ec6 --- /dev/null +++ b/command/agent/csi_endpoint.go @@ -0,0 +1,118 @@ +package agent + +import ( + "net/http" + "strings" + + "github.com/hashicorp/nomad/nomad/structs" +) + +func (s *HTTPServer) CSIVolumesRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "GET" { + return nil, CodedError(405, ErrInvalidMethod) + } + + args := structs.CSIVolumeListRequest{} + + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + var out structs.CSIVolumeListResponse + if err := s.agent.RPC("CSIVolume.List", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + return out.Volumes, nil +} + +// CSIVolumeSpecificRequest dispatches GET and PUT +func (s *HTTPServer) CSIVolumeSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/csi/volume/") + + // tokenize the suffix of the path to get the alloc id and find the action + // invoked on the alloc id + tokens := strings.Split(reqSuffix, "/") + if len(tokens) > 2 || len(tokens) < 1 { + return nil, CodedError(404, resourceNotFoundErr) + } + id := tokens[0] + + switch req.Method { + case "GET": + return s.csiVolumeGet(id, resp, req) + case "PUT": + return s.csiVolumePut(id, resp, req) + case "DELETE": + return s.csiVolumeDelete(id, resp, req) + default: + return nil, CodedError(405, ErrInvalidMethod) + } +} + +func (s *HTTPServer) csiVolumeGet(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + args := structs.CSIVolumeGetRequest{ + ID: id, + } + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + var out structs.CSIVolumeGetResponse + if err := s.agent.RPC("CSIVolume.Get", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + if out.Volume == nil { + return nil, CodedError(404, "alloc not found") + } + + return out.Volume, nil +} + +func (s *HTTPServer) csiVolumePut(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "PUT" { + return nil, CodedError(405, ErrInvalidMethod) + } + + args0 := structs.CSIVolumeRegisterRequest{} + if err := decodeBody(req, &args0); err != nil { + return err, CodedError(400, err.Error()) + } + + args := structs.CSIVolumeRegisterRequest{ + Volumes: args0.Volumes, + } + s.parseWriteRequest(req, &args.WriteRequest) + + var out structs.CSIVolumeRegisterResponse + if err := s.agent.RPC("CSIVolume.Register", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + + return nil, nil +} + +func (s *HTTPServer) csiVolumeDelete(id string, resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "DELETE" { + return nil, CodedError(405, ErrInvalidMethod) + } + + args := structs.CSIVolumeDeregisterRequest{ + VolumeIDs: []string{id}, + } + s.parseWriteRequest(req, &args.WriteRequest) + + var out structs.CSIVolumeDeregisterResponse + if err := s.agent.RPC("CSIVolume.Deregister", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + + return nil, nil +} diff --git a/command/agent/http.go b/command/agent/http.go index 910bb74ef..57100eca1 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -253,6 +253,9 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) { s.mux.HandleFunc("/v1/deployments", s.wrap(s.DeploymentsRequest)) s.mux.HandleFunc("/v1/deployment/", s.wrap(s.DeploymentSpecificRequest)) + s.mux.HandleFunc("/v1/csi/volumes", s.wrap(s.CSIVolumesRequest)) + s.mux.HandleFunc("/v1/csi/volume/", s.wrap(s.CSIVolumeSpecificRequest)) + s.mux.HandleFunc("/v1/acl/policies", s.wrap(s.ACLPoliciesRequest)) s.mux.HandleFunc("/v1/acl/policy/", s.wrap(s.ACLPolicySpecificRequest)) From 334979a75400ff579ec222382a32fb4c4f3d19ba Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Fri, 3 Jan 2020 11:55:40 -0500 Subject: [PATCH 033/126] nomad/rpc: indicate missing region in error message --- nomad/rpc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nomad/rpc.go b/nomad/rpc.go index 29461eb40..e5ab6f2a7 100644 --- a/nomad/rpc.go +++ b/nomad/rpc.go @@ -511,7 +511,7 @@ func (r *rpcHandler) forward(method string, info structs.RPCInfo, args interface region := info.RequestRegion() if region == "" { - return true, fmt.Errorf("missing target RPC") + return true, fmt.Errorf("missing region for target RPC") } // Handle region forwarding From af857ae6604fa87cd4e253638d3cdee4c0988c5a Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Thu, 2 Jan 2020 14:47:26 -0500 Subject: [PATCH 034/126] docs contributing: checklist-command --- contributing/checklist-command.md | 1 + 1 file changed, 1 insertion(+) diff --git a/contributing/checklist-command.md b/contributing/checklist-command.md index 399767616..6d99b24d1 100644 --- a/contributing/checklist-command.md +++ b/contributing/checklist-command.md @@ -19,6 +19,7 @@ CLI (command/) -> API Client (api/) -> HTTP API (command/agent) -> RPC (nomad/) * [ ] Implement `-verbose` (expands truncated UUIDs, adds other detail) * [ ] Update help text * [ ] Implement and test new HTTP endpoint in `command/agent/_endpoint.go` +* [ ] Register new URL paths in `command/agent/http.go` * [ ] Implement and test new RPC endpoint in `nomad/_endpoint.go` * [ ] Implement and test new Client RPC endpoint in `client/_endpoint.go` (For client endpoints like Filesystem only) From 61cfc806ad07bb3ca615e119d4ef29b1689788bb Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Thu, 16 Jan 2020 10:40:40 -0500 Subject: [PATCH 035/126] csi_volume_endpoint_test: gen uuids to avoid t.Parallel conflicts --- nomad/csi_volume_endpoint_test.go | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/nomad/csi_volume_endpoint_test.go b/nomad/csi_volume_endpoint_test.go index 35c3c371b..a66cc5477 100644 --- a/nomad/csi_volume_endpoint_test.go +++ b/nomad/csi_volume_endpoint_test.go @@ -5,6 +5,7 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" @@ -29,9 +30,11 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { codec := rpcClient(t, srv) + id0 := uuid.Generate() + // Create the volume vols := []*structs.CSIVolume{{ - ID: "DEADBEEF-70AD-4672-9178-802BCA500C87", + ID: id0, Namespace: ns, AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, @@ -42,7 +45,7 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { // Create the register request req := &structs.CSIVolumeGetRequest{ - ID: "DEADBEEF-70AD-4672-9178-802BCA500C87", + ID: id0, QueryOptions: structs.QueryOptions{ Region: "global", Namespace: ns, @@ -75,9 +78,11 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { codec := rpcClient(t, srv) + id0 := uuid.Generate() + // Create the volume vols := []*structs.CSIVolume{{ - ID: "DEADBEEF-70AD-4672-9178-802BCA500C87", + ID: id0, Namespace: "notTheNamespace", Driver: "minnie", AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, @@ -106,7 +111,7 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { getToken := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) req2 := &structs.CSIVolumeGetRequest{ - ID: "DEADBEEF-70AD-4672-9178-802BCA500C87", + ID: id0, QueryOptions: structs.QueryOptions{ Region: "global", AuthToken: getToken.SecretID, @@ -125,7 +130,7 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { // Deregistration works req3 := &structs.CSIVolumeDeregisterRequest{ - VolumeIDs: []string{"DEADBEEF-70AD-4672-9178-802BCA500C87"}, + VolumeIDs: []string{id0}, WriteRequest: structs.WriteRequest{ Region: "global", Namespace: ns, @@ -160,21 +165,25 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { nsTok := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) codec := rpcClient(t, srv) + id0 := uuid.Generate() + id1 := uuid.Generate() + id2 := uuid.Generate() + // Create the volume vols := []*structs.CSIVolume{{ - ID: "DEADBEEF-70AD-4672-9178-802BCA500C87", + ID: id0, Namespace: ns, AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, Driver: "minnie", }, { - ID: "BAADF00D-70AD-4672-9178-802BCA500C87", + ID: id1, Namespace: ns, AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, Driver: "adam", }, { - ID: "BEADCEED-70AD-4672-9178-802BCA500C87", + ID: id2, Namespace: ms, AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, From ab1edd4e249dd03ace7bbb528c6c027670b4f1ad Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Thu, 23 Jan 2020 12:11:59 +0100 Subject: [PATCH 036/126] csi: Add Nomad Model for VolumeCapabilities This commit introduces a nomad model for interacting with CSI VolumeCapabilities as a pre-requisite for implementing NodeStageVolume and NodeMountVolume correctly. These fields have a few special characteristics that I've tried to model here - specificially, we make a basic attempt to avoid printing data that should be redacted during debug logs (additional mount flags), and also attempt to make debuggability of other integer fields easier by implementing the fmt.Stringer and fmt.GoStringer interfaces as necessary. We do not currnetly implement a CSI Protobuf -> Nomad implementation transformation as this is currently not needed by any used RPCs. --- plugins/csi/plugin.go | 104 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 7273c65f1..d48471c64 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -2,6 +2,7 @@ package csi import ( "context" + "fmt" csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" "github.com/hashicorp/nomad/plugins/base" @@ -158,3 +159,106 @@ func NewNodeCapabilitySet(resp *csipbv1.NodeGetCapabilitiesResponse) *NodeCapabi return cs } + +// VolumeAccessMode represents the desired access mode of the CSI Volume +type VolumeAccessMode csipbv1.VolumeCapability_AccessMode_Mode + +var _ fmt.Stringer = VolumeAccessModeUnknown + +var ( + VolumeAccessModeUnknown = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_UNKNOWN) + VolumeAccessModeSingleNodeWriter = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_WRITER) + VolumeAccessModeSingleNodeReaderOnly = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY) + VolumeAccessModeMultiNodeReaderOnly = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY) + VolumeAccessModeMultiNodeSingleWriter = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER) + VolumeAccessModeMultiNodeMultiWriter = VolumeAccessMode(csipbv1.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER) +) + +func (a VolumeAccessMode) String() string { + return a.ToCSIRepresentation().String() +} + +func (a VolumeAccessMode) ToCSIRepresentation() csipbv1.VolumeCapability_AccessMode_Mode { + return csipbv1.VolumeCapability_AccessMode_Mode(a) +} + +// VolumeAccessType represents the filesystem apis that the user intends to use +// with the volume. E.g whether it will be used as a block device or if they wish +// to have a mounted filesystem. +type VolumeAccessType int32 + +var _ fmt.Stringer = VolumeAccessTypeBlock + +var ( + VolumeAccessTypeBlock VolumeAccessType = 1 + VolumeAccessTypeMount VolumeAccessType = 2 +) + +func (v VolumeAccessType) String() string { + if v == VolumeAccessTypeBlock { + return "VolumeAccessType.Block" + } else if v == VolumeAccessTypeMount { + return "VolumeAccessType.Mount" + } else { + return "VolumeAccessType.Unspecified" + } +} + +// VolumeMountOptions contain optional additional configuration that can be used +// when specifying that a Volume should be used with VolumeAccessTypeMount. +type VolumeMountOptions struct { + // FSType is an optional field that allows an operator to specify the type + // of the filesystem. + FSType string + + // MountFlags contains additional options that may be used when mounting the + // volume by the plugin. This may contain sensitive data and should not be + // leaked. + MountFlags []string +} + +// VolumeMountOptions implements the Stringer and GoStringer interfaces to prevent +// accidental leakage of sensitive mount flags via logs. +var _ fmt.Stringer = &VolumeMountOptions{} +var _ fmt.GoStringer = &VolumeMountOptions{} + +func (v *VolumeMountOptions) String() string { + mountFlagsString := "nil" + if len(v.MountFlags) != 0 { + mountFlagsString = "[REDACTED]" + } + + return fmt.Sprintf("csi.VolumeMountOptions(FSType: %s, MountFlags: %s)", v.FSType, mountFlagsString) +} + +func (v *VolumeMountOptions) GoString() string { + return v.String() +} + +// VolumeCapability describes the overall usage requirements for a given CSI Volume +type VolumeCapability struct { + AccessType VolumeAccessType + AccessMode VolumeAccessMode + VolumeMountOptions *VolumeMountOptions +} + +func (c *VolumeCapability) ToCSIRepresentation() *csipbv1.VolumeCapability { + vc := &csipbv1.VolumeCapability{ + AccessMode: &csipbv1.VolumeCapability_AccessMode{ + Mode: c.AccessMode.ToCSIRepresentation(), + }, + } + + if c.AccessType == VolumeAccessTypeMount { + vc.AccessType = &csipbv1.VolumeCapability_Mount{ + Mount: &csipbv1.VolumeCapability_MountVolume{ + FsType: c.VolumeMountOptions.FSType, + MountFlags: c.VolumeMountOptions.MountFlags, + }, + } + } else { + vc.AccessType = &csipbv1.VolumeCapability_Block{Block: &csipbv1.VolumeCapability_BlockVolume{}} + } + + return vc +} From 317b6807447a715312a1c5890978404c24d93dbe Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Thu, 23 Jan 2020 12:20:36 +0100 Subject: [PATCH 037/126] csi: Add csi.NodeStageVolume to the NodeClient Implements a fake version of NodeStageVolume as a dependency of implementing the client.NodeStageVolume request --- plugins/csi/client.go | 2 ++ plugins/csi/testing/client.go | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 556f9142a..08add6719 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -67,6 +67,8 @@ type CSIControllerClient interface { type CSINodeClient interface { NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetInfoResponse, error) + NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeStageVolumeResponse, error) + // NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) } type client struct { diff --git a/plugins/csi/testing/client.go b/plugins/csi/testing/client.go index 75f20929d..f341634a8 100644 --- a/plugins/csi/testing/client.go +++ b/plugins/csi/testing/client.go @@ -83,6 +83,7 @@ type NodeClient struct { NextErr error NextCapabilitiesResponse *csipbv1.NodeGetCapabilitiesResponse NextGetInfoResponse *csipbv1.NodeGetInfoResponse + NextStageVolumeResponse *csipbv1.NodeStageVolumeResponse } // NewNodeClient returns a new ControllerClient @@ -103,3 +104,7 @@ func (c *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGe func (c *NodeClient) NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetInfoResponse, error) { return c.NextGetInfoResponse, c.NextErr } + +func (c *NodeClient) NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeStageVolumeResponse, error) { + return c.NextStageVolumeResponse, c.NextErr +} From 07651a52314ebf93266b629134398a634712f03b Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Thu, 23 Jan 2020 12:24:37 +0100 Subject: [PATCH 038/126] csi: Add NodeStageVolume RPC --- plugins/csi/client.go | 30 ++++++++++++++++++++++++++++++ plugins/csi/client_test.go | 37 +++++++++++++++++++++++++++++++++++++ plugins/csi/plugin.go | 5 +++++ 3 files changed, 72 insertions(+) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 08add6719..ea89be8ed 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -296,3 +296,33 @@ func (c *client) NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) return result, nil } + +func (c *client) NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *VolumeCapability) error { + if c == nil { + return fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return fmt.Errorf("Client not initialized") + } + + // These errors should not be returned during production use but exist as aids + // during Nomad Development + if volumeID == "" { + return fmt.Errorf("missing volumeID") + } + if stagingTargetPath == "" { + return fmt.Errorf("missing stagingTargetPath") + } + + req := &csipbv1.NodeStageVolumeRequest{ + VolumeId: volumeID, + PublishContext: publishContext, + StagingTargetPath: stagingTargetPath, + VolumeCapability: capabilities.ToCSIRepresentation(), + } + + // NodeStageVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodeStageVolume(ctx, req) + return err +} diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 1d372ad34..a98e0318b 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -408,3 +408,40 @@ func TestClient_RPC_ControllerPublishVolume(t *testing.T) { }) } } + +func TestClient_RPC_NodeStageVolume(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.NodeStageVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "handles success", + ResponseErr: nil, + ExpectedErr: nil, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, _, nc, client := newTestClient() + defer client.Close() + + nc.NextErr = c.ResponseErr + nc.NextStageVolumeResponse = c.Response + + err := client.NodeStageVolume(context.TODO(), "foo", nil, "/foo", &VolumeCapability{}) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } else { + require.Nil(t, err) + } + }) + } +} diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index d48471c64..3a01587ed 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -42,6 +42,11 @@ type CSIPlugin interface { // respect to the SP. NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) + // NodeStageVolume is used when a plugin has the STAGE_UNSTAGE volume capability + // to prepare a volume for usage on a host. If err == nil, the response should + // be assumed to be successful. + NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *VolumeCapability) error + // Shutdown the client and ensure any connections are cleaned up. Close() error } From f208770e948bdf57dc660f5d6f76bf8bcb71e235 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Thu, 23 Jan 2020 12:28:57 +0100 Subject: [PATCH 039/126] csi: Add NodeStageVolume to fake client --- plugins/csi/fake/client.go | 15 +++++++++++++++ plugins/csi/testing/client.go | 1 + 2 files changed, 16 insertions(+) diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index eb3d2c79b..d25b054b6 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -50,6 +50,9 @@ type Client struct { NextNodeGetInfoResponse *csi.NodeGetInfoResponse NextNodeGetInfoErr error NodeGetInfoCallCount int64 + + NextNodeStageVolumeErr error + NodeStageVolumeCallCount int64 } // PluginInfo describes the type and version of a plugin. @@ -146,6 +149,18 @@ func (c *Client) NodeGetInfo(ctx context.Context) (*csi.NodeGetInfoResponse, err return c.NextNodeGetInfoResponse, c.NextNodeGetInfoErr } +// NodeStageVolume is used when a plugin has the STAGE_UNSTAGE volume capability +// to prepare a volume for usage on a host. If err == nil, the response should +// be assumed to be successful. +func (c *Client) NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *csi.VolumeCapability) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeStageVolumeCallCount++ + + return c.NextNodeStageVolumeErr +} + // Shutdown the client and ensure any connections are cleaned up. func (c *Client) Close() error { return nil diff --git a/plugins/csi/testing/client.go b/plugins/csi/testing/client.go index f341634a8..91de486ae 100644 --- a/plugins/csi/testing/client.go +++ b/plugins/csi/testing/client.go @@ -95,6 +95,7 @@ func (f *NodeClient) Reset() { f.NextErr = nil f.NextCapabilitiesResponse = nil f.NextGetInfoResponse = nil + f.NextStageVolumeResponse = nil } func (c *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) { From 5c447396fa45db021536dce93c10d126cf796063 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 24 Jan 2020 14:23:02 +0100 Subject: [PATCH 040/126] csi: Add NodeUnstageVolume as a CSI Dependency --- plugins/csi/client.go | 2 +- plugins/csi/testing/client.go | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index ea89be8ed..6c9d26c39 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -68,7 +68,7 @@ type CSINodeClient interface { NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetInfoResponse, error) NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeStageVolumeResponse, error) - // NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) + NodeUnstageVolume(ctx context.Context, in *csipbv1.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnstageVolumeResponse, error) } type client struct { diff --git a/plugins/csi/testing/client.go b/plugins/csi/testing/client.go index 91de486ae..77d2fd3e0 100644 --- a/plugins/csi/testing/client.go +++ b/plugins/csi/testing/client.go @@ -80,10 +80,11 @@ func (c *ControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *c // NodeClient is a CSI Node client used for testing type NodeClient struct { - NextErr error - NextCapabilitiesResponse *csipbv1.NodeGetCapabilitiesResponse - NextGetInfoResponse *csipbv1.NodeGetInfoResponse - NextStageVolumeResponse *csipbv1.NodeStageVolumeResponse + NextErr error + NextCapabilitiesResponse *csipbv1.NodeGetCapabilitiesResponse + NextGetInfoResponse *csipbv1.NodeGetInfoResponse + NextStageVolumeResponse *csipbv1.NodeStageVolumeResponse + NextUnstageVolumeResponse *csipbv1.NodeUnstageVolumeResponse } // NewNodeClient returns a new ControllerClient @@ -96,6 +97,7 @@ func (f *NodeClient) Reset() { f.NextCapabilitiesResponse = nil f.NextGetInfoResponse = nil f.NextStageVolumeResponse = nil + f.NextUnstageVolumeResponse = nil } func (c *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) { @@ -109,3 +111,7 @@ func (c *NodeClient) NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoReq func (c *NodeClient) NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeStageVolumeResponse, error) { return c.NextStageVolumeResponse, c.NextErr } + +func (c *NodeClient) NodeUnstageVolume(ctx context.Context, in *csipbv1.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnstageVolumeResponse, error) { + return c.NextUnstageVolumeResponse, c.NextErr +} From 98f00a9220aab0dea4c18813300800b68b8f4f6a Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 24 Jan 2020 14:34:04 +0100 Subject: [PATCH 041/126] csi: Add NodeUnstageVolume RPCs to CSIPlugin --- plugins/csi/client.go | 28 ++++++++++++++++++++++++++++ plugins/csi/client_test.go | 37 +++++++++++++++++++++++++++++++++++++ plugins/csi/fake/client.go | 17 +++++++++++++++++ plugins/csi/plugin.go | 7 +++++++ 4 files changed, 89 insertions(+) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 6c9d26c39..b3442d7d4 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -326,3 +326,31 @@ func (c *client) NodeStageVolume(ctx context.Context, volumeID string, publishCo _, err := c.nodeClient.NodeStageVolume(ctx, req) return err } + +func (c *client) NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string) error { + if c == nil { + return fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return fmt.Errorf("Client not initialized") + } + + // These errors should not be returned during production use but exist as aids + // during Nomad Development + if volumeID == "" { + return fmt.Errorf("missing volumeID") + } + if stagingTargetPath == "" { + return fmt.Errorf("missing stagingTargetPath") + } + + req := &csipbv1.NodeUnstageVolumeRequest{ + VolumeId: volumeID, + StagingTargetPath: stagingTargetPath, + } + + // NodeUnstageVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodeUnstageVolume(ctx, req) + return err +} diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index a98e0318b..237f362cc 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -445,3 +445,40 @@ func TestClient_RPC_NodeStageVolume(t *testing.T) { }) } } + +func TestClient_RPC_NodeUnstageVolume(t *testing.T) { + cases := []struct { + Name string + ResponseErr error + Response *csipbv1.NodeUnstageVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "handles success", + ResponseErr: nil, + ExpectedErr: nil, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, _, nc, client := newTestClient() + defer client.Close() + + nc.NextErr = c.ResponseErr + nc.NextUnstageVolumeResponse = c.Response + + err := client.NodeUnstageVolume(context.TODO(), "foo", "/foo") + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } else { + require.Nil(t, err) + } + }) + } +} diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index d25b054b6..29b9b9081 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -53,6 +53,9 @@ type Client struct { NextNodeStageVolumeErr error NodeStageVolumeCallCount int64 + + NextNodeUnstageVolumeErr error + NodeUnstageVolumeCallCount int64 } // PluginInfo describes the type and version of a plugin. @@ -161,6 +164,20 @@ func (c *Client) NodeStageVolume(ctx context.Context, volumeID string, publishCo return c.NextNodeStageVolumeErr } +// NodeUnstageVolume is used when a plugin has the STAGE_UNSTAGE volume capability +// to undo the work performed by NodeStageVolume. If a volume has been staged, +// this RPC must be called before freeing the volume. +// +// If err == nil, the response should be assumed to be successful. +func (c *Client) NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeUnstageVolumeCallCount++ + + return c.NextNodeUnstageVolumeErr +} + // Shutdown the client and ensure any connections are cleaned up. func (c *Client) Close() error { return nil diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 3a01587ed..9e9b622ef 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -47,6 +47,13 @@ type CSIPlugin interface { // be assumed to be successful. NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *VolumeCapability) error + // NodeUnstageVolume is used when a plugin has the STAGE_UNSTAGE volume capability + // to undo the work performed by NodeStageVolume. If a volume has been staged, + // this RPC must be called before freeing the volume. + // + // If err == nil, the response should be assumed to be successful. + NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string) error + // Shutdown the client and ensure any connections are cleaned up. Close() error } From 02c4612e65464ce5881426265b8ec9ace916dd69 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 24 Jan 2020 17:20:23 +0100 Subject: [PATCH 042/126] csi: Add NodePublishVolume RPCs --- plugins/csi/client.go | 20 +++++++++- plugins/csi/client_test.go | 57 ++++++++++++++++++++++++++++ plugins/csi/fake/client.go | 12 ++++++ plugins/csi/plugin.go | 70 +++++++++++++++++++++++++++++++++++ plugins/csi/testing/client.go | 8 +++- 5 files changed, 165 insertions(+), 2 deletions(-) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index b3442d7d4..80685b9fb 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -69,6 +69,7 @@ type CSINodeClient interface { NodeGetInfo(ctx context.Context, in *csipbv1.NodeGetInfoRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetInfoResponse, error) NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeStageVolumeResponse, error) NodeUnstageVolume(ctx context.Context, in *csipbv1.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnstageVolumeResponse, error) + NodePublishVolume(ctx context.Context, in *csipbv1.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodePublishVolumeResponse, error) } type client struct { @@ -334,7 +335,6 @@ func (c *client) NodeUnstageVolume(ctx context.Context, volumeID string, staging if c.nodeClient == nil { return fmt.Errorf("Client not initialized") } - // These errors should not be returned during production use but exist as aids // during Nomad Development if volumeID == "" { @@ -354,3 +354,21 @@ func (c *client) NodeUnstageVolume(ctx context.Context, volumeID string, staging _, err := c.nodeClient.NodeUnstageVolume(ctx, req) return err } + +func (c *client) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) error { + if c == nil { + return fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return fmt.Errorf("Client not initialized") + } + + if err := req.Validate(); err != nil { + return fmt.Errorf("validation error: %v", err) + } + + // NodePublishVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodePublishVolume(ctx, req.ToCSIRepresentation()) + return err +} diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 237f362cc..23da49b77 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -2,6 +2,7 @@ package csi import ( "context" + "errors" "fmt" "testing" @@ -482,3 +483,59 @@ func TestClient_RPC_NodeUnstageVolume(t *testing.T) { }) } } + +func TestClient_RPC_NodePublishVolume(t *testing.T) { + cases := []struct { + Name string + Request *NodePublishVolumeRequest + ResponseErr error + Response *csipbv1.NodePublishVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + Request: &NodePublishVolumeRequest{ + VolumeID: "foo", + TargetPath: "/dev/null", + VolumeCapability: &VolumeCapability{}, + }, + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "handles success", + Request: &NodePublishVolumeRequest{ + VolumeID: "foo", + TargetPath: "/dev/null", + VolumeCapability: &VolumeCapability{}, + }, + ResponseErr: nil, + ExpectedErr: nil, + }, + { + Name: "Performs validation of the publish volume request", + Request: &NodePublishVolumeRequest{ + VolumeID: "", + }, + ResponseErr: nil, + ExpectedErr: errors.New("missing VolumeID"), + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, _, nc, client := newTestClient() + defer client.Close() + + nc.NextErr = c.ResponseErr + nc.NextPublishVolumeResponse = c.Response + + err := client.NodePublishVolume(context.TODO(), c.Request) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } else { + require.Nil(t, err) + } + }) + } +} diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index 29b9b9081..808e7bb37 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -56,6 +56,9 @@ type Client struct { NextNodeUnstageVolumeErr error NodeUnstageVolumeCallCount int64 + + NextNodePublishVolumeErr error + NodePublishVolumeCallCount int64 } // PluginInfo describes the type and version of a plugin. @@ -178,6 +181,15 @@ func (c *Client) NodeUnstageVolume(ctx context.Context, volumeID string, staging return c.NextNodeUnstageVolumeErr } +func (c *Client) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodePublishVolumeCallCount++ + + return c.NextNodePublishVolumeErr +} + // Shutdown the client and ensure any connections are cleaned up. func (c *Client) Close() error { return nil diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 9e9b622ef..a9eb6b5ce 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -2,6 +2,7 @@ package csi import ( "context" + "errors" "fmt" csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" @@ -54,10 +55,79 @@ type CSIPlugin interface { // If err == nil, the response should be assumed to be successful. NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string) error + // NodePublishVolume is used to prepare a volume for use by an allocation. + // if err == nil the response should be assumed to be successful. + NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) error + // Shutdown the client and ensure any connections are cleaned up. Close() error } +type NodePublishVolumeRequest struct { + // The ID of the volume to publish. + VolumeID string + + // If the volume was attached via a call to `ControllerPublishVolume` then + // we need to provide the returned PublishContext here. + PublishContext map[string]string + + // The path to which the volume was staged by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // E.g {the plugins internal mount path}/staging/volumeid/... + // + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + StagingTargetPath string + + // The path to which the volume will be published. + // It MUST be an absolute path in the root filesystem of the process serving this + // request. + // E.g {the plugins internal mount path}/per-alloc/allocid/volumeid/... + // + // The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the parent directory of this path exists + // and that the process serving the request has `read` and `write` + // permissions to that parent directory. + TargetPath string + + // Volume capability describing how the CO intends to use this volume. + VolumeCapability *VolumeCapability + + Readonly bool + + // Reserved for future use. + Secrets map[string]string +} + +func (r *NodePublishVolumeRequest) ToCSIRepresentation() *csipbv1.NodePublishVolumeRequest { + return &csipbv1.NodePublishVolumeRequest{ + VolumeId: r.VolumeID, + PublishContext: r.PublishContext, + StagingTargetPath: r.StagingTargetPath, + TargetPath: r.TargetPath, + VolumeCapability: r.VolumeCapability.ToCSIRepresentation(), + Readonly: r.Readonly, + Secrets: r.Secrets, + } +} + +func (r *NodePublishVolumeRequest) Validate() error { + if r.VolumeID == "" { + return errors.New("missing VolumeID") + } + + if r.TargetPath == "" { + return errors.New("missing TargetPath") + } + + if r.VolumeCapability == nil { + return errors.New("missing VolumeCapabilities") + } + + return nil +} + type PluginCapabilitySet struct { hasControllerService bool hasTopologies bool diff --git a/plugins/csi/testing/client.go b/plugins/csi/testing/client.go index 77d2fd3e0..f1d22f19d 100644 --- a/plugins/csi/testing/client.go +++ b/plugins/csi/testing/client.go @@ -85,9 +85,10 @@ type NodeClient struct { NextGetInfoResponse *csipbv1.NodeGetInfoResponse NextStageVolumeResponse *csipbv1.NodeStageVolumeResponse NextUnstageVolumeResponse *csipbv1.NodeUnstageVolumeResponse + NextPublishVolumeResponse *csipbv1.NodePublishVolumeResponse } -// NewNodeClient returns a new ControllerClient +// NewNodeClient returns a new stub NodeClient func NewNodeClient() *NodeClient { return &NodeClient{} } @@ -98,6 +99,7 @@ func (f *NodeClient) Reset() { f.NextGetInfoResponse = nil f.NextStageVolumeResponse = nil f.NextUnstageVolumeResponse = nil + f.NextPublishVolumeResponse = nil } func (c *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) { @@ -115,3 +117,7 @@ func (c *NodeClient) NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageV func (c *NodeClient) NodeUnstageVolume(ctx context.Context, in *csipbv1.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnstageVolumeResponse, error) { return c.NextUnstageVolumeResponse, c.NextErr } + +func (c *NodeClient) NodePublishVolume(ctx context.Context, in *csipbv1.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodePublishVolumeResponse, error) { + return c.NextPublishVolumeResponse, c.NextErr +} From a4b96aff33946d01b3aa0ce83a65d64d04fa9e85 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Sat, 25 Jan 2020 10:39:21 +0100 Subject: [PATCH 043/126] csi: Nil check ToCSIRepresentation implementations --- plugins/csi/plugin.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index a9eb6b5ce..cd5ad7555 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -101,6 +101,10 @@ type NodePublishVolumeRequest struct { } func (r *NodePublishVolumeRequest) ToCSIRepresentation() *csipbv1.NodePublishVolumeRequest { + if r == nil { + return nil + } + return &csipbv1.NodePublishVolumeRequest{ VolumeId: r.VolumeID, PublishContext: r.PublishContext, @@ -325,6 +329,10 @@ type VolumeCapability struct { } func (c *VolumeCapability) ToCSIRepresentation() *csipbv1.VolumeCapability { + if c == nil { + return nil + } + vc := &csipbv1.VolumeCapability{ AccessMode: &csipbv1.VolumeCapability_AccessMode{ Mode: c.AccessMode.ToCSIRepresentation(), From 51270ae0f411dfd1adae69faa8dcb23cf647776e Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Mon, 27 Jan 2020 11:17:10 +0100 Subject: [PATCH 044/126] csi: Support for NodeUnpublishVolume RPCs --- plugins/csi/client.go | 28 ++++++++++++++++++ plugins/csi/client_test.go | 53 +++++++++++++++++++++++++++++++++++ plugins/csi/fake/client.go | 12 ++++++++ plugins/csi/plugin.go | 5 ++++ plugins/csi/testing/client.go | 18 ++++++++---- 5 files changed, 110 insertions(+), 6 deletions(-) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 80685b9fb..0766c8e9a 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -70,6 +70,7 @@ type CSINodeClient interface { NodeStageVolume(ctx context.Context, in *csipbv1.NodeStageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeStageVolumeResponse, error) NodeUnstageVolume(ctx context.Context, in *csipbv1.NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnstageVolumeResponse, error) NodePublishVolume(ctx context.Context, in *csipbv1.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *csipbv1.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnpublishVolumeResponse, error) } type client struct { @@ -372,3 +373,30 @@ func (c *client) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRe _, err := c.nodeClient.NodePublishVolume(ctx, req.ToCSIRepresentation()) return err } + +func (c *client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string) error { + if c == nil { + return fmt.Errorf("Client not initialized") + } + if c.nodeClient == nil { + return fmt.Errorf("Client not initialized") + } + + if volumeID == "" { + return fmt.Errorf("missing VolumeID") + } + + if targetPath == "" { + return fmt.Errorf("missing TargetPath") + } + + req := &csipbv1.NodeUnpublishVolumeRequest{ + VolumeId: volumeID, + TargetPath: targetPath, + } + + // NodeUnpublishVolume's response contains no extra data. If err == nil, we were + // successful. + _, err := c.nodeClient.NodeUnpublishVolume(ctx, req) + return err +} diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 23da49b77..d458c58c5 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -539,3 +539,56 @@ func TestClient_RPC_NodePublishVolume(t *testing.T) { }) } } +func TestClient_RPC_NodeUnpublishVolume(t *testing.T) { + cases := []struct { + Name string + VolumeID string + TargetPath string + ResponseErr error + Response *csipbv1.NodeUnpublishVolumeResponse + ExpectedErr error + }{ + { + Name: "handles underlying grpc errors", + VolumeID: "foo", + TargetPath: "/dev/null", + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "handles success", + VolumeID: "foo", + TargetPath: "/dev/null", + ResponseErr: nil, + ExpectedErr: nil, + }, + { + Name: "Performs validation of the request args - VolumeID", + ResponseErr: nil, + ExpectedErr: errors.New("missing VolumeID"), + }, + { + Name: "Performs validation of the request args - TargetPath", + VolumeID: "foo", + ResponseErr: nil, + ExpectedErr: errors.New("missing TargetPath"), + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, _, nc, client := newTestClient() + defer client.Close() + + nc.NextErr = c.ResponseErr + nc.NextUnpublishVolumeResponse = c.Response + + err := client.NodeUnpublishVolume(context.TODO(), c.VolumeID, c.TargetPath) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } else { + require.Nil(t, err) + } + }) + } +} diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index 808e7bb37..c8e0cfd55 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -59,6 +59,9 @@ type Client struct { NextNodePublishVolumeErr error NodePublishVolumeCallCount int64 + + NextNodeUnpublishVolumeErr error + NodeUnpublishVolumeCallCount int64 } // PluginInfo describes the type and version of a plugin. @@ -190,6 +193,15 @@ func (c *Client) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu return c.NextNodePublishVolumeErr } +func (c *Client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.NodeUnpublishVolumeCallCount++ + + return c.NextNodeUnpublishVolumeErr +} + // Shutdown the client and ensure any connections are cleaned up. func (c *Client) Close() error { return nil diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index cd5ad7555..172aa01d7 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -59,6 +59,11 @@ type CSIPlugin interface { // if err == nil the response should be assumed to be successful. NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) error + // NodeUnpublishVolume is used to cleanup usage of a volume for an alloc. This + // MUST be called before calling NodeUnstageVolume or ControllerUnpublishVolume + // for the given volume. + NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string) error + // Shutdown the client and ensure any connections are cleaned up. Close() error } diff --git a/plugins/csi/testing/client.go b/plugins/csi/testing/client.go index f1d22f19d..f28d9287e 100644 --- a/plugins/csi/testing/client.go +++ b/plugins/csi/testing/client.go @@ -80,12 +80,13 @@ func (c *ControllerClient) ValidateVolumeCapabilities(ctx context.Context, in *c // NodeClient is a CSI Node client used for testing type NodeClient struct { - NextErr error - NextCapabilitiesResponse *csipbv1.NodeGetCapabilitiesResponse - NextGetInfoResponse *csipbv1.NodeGetInfoResponse - NextStageVolumeResponse *csipbv1.NodeStageVolumeResponse - NextUnstageVolumeResponse *csipbv1.NodeUnstageVolumeResponse - NextPublishVolumeResponse *csipbv1.NodePublishVolumeResponse + NextErr error + NextCapabilitiesResponse *csipbv1.NodeGetCapabilitiesResponse + NextGetInfoResponse *csipbv1.NodeGetInfoResponse + NextStageVolumeResponse *csipbv1.NodeStageVolumeResponse + NextUnstageVolumeResponse *csipbv1.NodeUnstageVolumeResponse + NextPublishVolumeResponse *csipbv1.NodePublishVolumeResponse + NextUnpublishVolumeResponse *csipbv1.NodeUnpublishVolumeResponse } // NewNodeClient returns a new stub NodeClient @@ -100,6 +101,7 @@ func (f *NodeClient) Reset() { f.NextStageVolumeResponse = nil f.NextUnstageVolumeResponse = nil f.NextPublishVolumeResponse = nil + f.NextUnpublishVolumeResponse = nil } func (c *NodeClient) NodeGetCapabilities(ctx context.Context, in *csipbv1.NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*csipbv1.NodeGetCapabilitiesResponse, error) { @@ -121,3 +123,7 @@ func (c *NodeClient) NodeUnstageVolume(ctx context.Context, in *csipbv1.NodeUnst func (c *NodeClient) NodePublishVolume(ctx context.Context, in *csipbv1.NodePublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodePublishVolumeResponse, error) { return c.NextPublishVolumeResponse, c.NextErr } + +func (c *NodeClient) NodeUnpublishVolume(ctx context.Context, in *csipbv1.NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*csipbv1.NodeUnpublishVolumeResponse, error) { + return c.NextUnpublishVolumeResponse, c.NextErr +} From 1a10433b97115104eb7580048e32a881a00495e0 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 8 Jan 2020 13:47:07 +0100 Subject: [PATCH 045/126] csi: Add VolumeManager (#6920) This changeset is some pre-requisite boilerplate that is required for introducing CSI volume management for client nodes. It extracts out fingerprinting logic from the csi instance manager. This change is to facilitate reusing the csimanager to also manage the node-local CSI functionality, as it is the easiest place for us to guaruntee health checking and to provide additional visibility into the running operations through the fingerprinter mechanism and goroutine. It also introduces the VolumeMounter interface that will be used to manage staging/publishing unstaging/unpublishing of volumes on the host. --- client/allocrunner/alloc_runner.go | 7 + client/allocrunner/alloc_runner_hooks.go | 1 + client/allocrunner/config.go | 5 + client/allocrunner/csi_hook.go | 46 +++ .../taskrunner/plugin_supervisor_hook.go | 14 +- client/allocrunner/taskrunner/task_runner.go | 8 + client/allocrunner/taskrunner/volume_hook.go | 70 ++++- client/client.go | 6 +- client/dynamicplugins/registry.go | 4 + client/pluginmanager/csimanager/doc.go | 15 + .../pluginmanager/csimanager/fingerprint.go | 157 ++++++++++ .../csimanager/fingerprint_test.go | 277 +++++++++++++++++ client/pluginmanager/csimanager/instance.go | 207 +++++-------- .../pluginmanager/csimanager/instance_test.go | 284 +----------------- client/pluginmanager/csimanager/interface.go | 34 +++ client/pluginmanager/csimanager/manager.go | 21 +- .../pluginmanager/csimanager/manager_test.go | 6 +- client/pluginmanager/csimanager/volume.go | 64 ++++ 18 files changed, 787 insertions(+), 439 deletions(-) create mode 100644 client/allocrunner/csi_hook.go create mode 100644 client/pluginmanager/csimanager/doc.go create mode 100644 client/pluginmanager/csimanager/fingerprint.go create mode 100644 client/pluginmanager/csimanager/fingerprint_test.go create mode 100644 client/pluginmanager/csimanager/interface.go create mode 100644 client/pluginmanager/csimanager/volume.go diff --git a/client/allocrunner/alloc_runner.go b/client/allocrunner/alloc_runner.go index 9c8286c2d..4cdf4ee00 100644 --- a/client/allocrunner/alloc_runner.go +++ b/client/allocrunner/alloc_runner.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/nomad/client/devicemanager" "github.com/hashicorp/nomad/client/dynamicplugins" cinterfaces "github.com/hashicorp/nomad/client/interfaces" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" cstate "github.com/hashicorp/nomad/client/state" cstructs "github.com/hashicorp/nomad/client/structs" @@ -139,6 +140,10 @@ type allocRunner struct { // plugins). dynamicRegistry dynamicplugins.Registry + // csiManager is used to wait for CSI Volumes to be attached, and by the task + // runner to manage their mounting + csiManager csimanager.Manager + // devicemanager is used to mount devices as well as lookup device // statistics devicemanager devicemanager.Manager @@ -184,6 +189,7 @@ func NewAllocRunner(config *Config) (*allocRunner, error) { prevAllocWatcher: config.PrevAllocWatcher, prevAllocMigrator: config.PrevAllocMigrator, dynamicRegistry: config.DynamicRegistry, + csiManager: config.CSIManager, devicemanager: config.DeviceManager, driverManager: config.DriverManager, serversContactedCh: config.ServersContactedCh, @@ -229,6 +235,7 @@ func (ar *allocRunner) initTaskRunners(tasks []*structs.Task) error { ConsulSI: ar.sidsClient, Vault: ar.vaultClient, DeviceStatsReporter: ar.deviceStatsReporter, + CSIManager: ar.csiManager, DeviceManager: ar.devicemanager, DriverManager: ar.driverManager, ServersContactedCh: ar.serversContactedCh, diff --git a/client/allocrunner/alloc_runner_hooks.go b/client/allocrunner/alloc_runner_hooks.go index 81c1e7cee..793ec530b 100644 --- a/client/allocrunner/alloc_runner_hooks.go +++ b/client/allocrunner/alloc_runner_hooks.go @@ -134,6 +134,7 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { logger: hookLogger, }), newConsulSockHook(hookLogger, alloc, ar.allocDir, config.ConsulConfig), + newCSIHook(hookLogger, alloc), } return nil diff --git a/client/allocrunner/config.go b/client/allocrunner/config.go index 4893c9604..257d8db69 100644 --- a/client/allocrunner/config.go +++ b/client/allocrunner/config.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/nomad/client/devicemanager" "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/interfaces" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" cstate "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/client/vaultclient" @@ -53,6 +54,10 @@ type Config struct { // plugins). DynamicRegistry dynamicplugins.Registry + // CSIManager is used to wait for CSI Volumes to be attached, and by the task + // runner to manage their mounting + CSIManager csimanager.Manager + // DeviceManager is used to mount devices as well as lookup device // statistics DeviceManager devicemanager.Manager diff --git a/client/allocrunner/csi_hook.go b/client/allocrunner/csi_hook.go new file mode 100644 index 000000000..cccd66397 --- /dev/null +++ b/client/allocrunner/csi_hook.go @@ -0,0 +1,46 @@ +package allocrunner + +import ( + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/nomad/structs" +) + +// csiHook will wait for remote csi volumes to be attached to the host before +// continuing. +// +// It is a noop for allocs that do not depend on CSI Volumes. +type csiHook struct { + alloc *structs.Allocation + logger hclog.Logger +} + +func (c *csiHook) Name() string { + return "csi_hook" +} + +func (c *csiHook) Prerun() error { + if !c.shouldRun() { + return nil + } + + // TODO: Volume attachment flow + return nil +} + +func newCSIHook(logger hclog.Logger, alloc *structs.Allocation) *csiHook { + return &csiHook{ + alloc: alloc, + logger: logger.Named("csi_hook"), + } +} + +func (h *csiHook) shouldRun() bool { + tg := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup) + for _, vol := range tg.Volumes { + if vol.Type == structs.VolumeTypeCSI { + return true + } + } + + return false +} diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index 5169275e8..6c63c4c96 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -63,6 +63,15 @@ var _ interfaces.TaskStopHook = &csiPluginSupervisorHook{} func newCSIPluginSupervisorHook(csiRootDir string, eventEmitter ti.EventEmitter, runner *TaskRunner, logger hclog.Logger) *csiPluginSupervisorHook { task := runner.Task() + + // The Plugin directory will look something like this: + // . + // .. + // csi.sock - A unix domain socket used to communicate with the CSI Plugin + // staging/ + // {volume-id}/{usage-mode-hash}/ - Intermediary mount point that will be used by plugins that support NODE_STAGE_UNSTAGE capabilities. + // per-alloc/ + // {alloc-id}/{volume-id}/{usage-mode-hash}/ - Mount Point that will be bind-mounted into tasks that utilise the volume pluginRoot := filepath.Join(csiRootDir, string(task.CSIPluginConfig.Type), task.CSIPluginConfig.ID) shutdownCtx, cancelFn := context.WithCancel(context.Background()) @@ -93,7 +102,7 @@ func (*csiPluginSupervisorHook) Name() string { func (h *csiPluginSupervisorHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { // Create the mount directory that the container will access if it doesn't - // already exist. Default to only user access. + // already exist. Default to only nomad user access. if err := os.MkdirAll(h.mountPoint, 0700); err != nil && !os.IsExist(err) { return fmt.Errorf("failed to create mount point: %v", err) } @@ -248,6 +257,9 @@ func (h *csiPluginSupervisorHook) registerPlugin(socketPath string) (func(), err ConnectionInfo: &dynamicplugins.PluginConnectionInfo{ SocketPath: socketPath, }, + Options: map[string]string{ + "MountPoint": h.mountPoint, + }, } } diff --git a/client/allocrunner/taskrunner/task_runner.go b/client/allocrunner/taskrunner/task_runner.go index 9982db96b..d33d740f3 100644 --- a/client/allocrunner/taskrunner/task_runner.go +++ b/client/allocrunner/taskrunner/task_runner.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/nomad/client/devicemanager" "github.com/hashicorp/nomad/client/dynamicplugins" cinterfaces "github.com/hashicorp/nomad/client/interfaces" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/client/pluginmanager/drivermanager" cstate "github.com/hashicorp/nomad/client/state" cstructs "github.com/hashicorp/nomad/client/structs" @@ -187,6 +188,9 @@ type TaskRunner struct { // deviceStatsReporter is used to lookup resource usage for alloc devices deviceStatsReporter cinterfaces.DeviceStatsReporter + // csiManager is used to manage the mounting of CSI volumes into tasks + csiManager csimanager.Manager + // devicemanager is used to mount devices as well as lookup device // statistics devicemanager devicemanager.Manager @@ -246,6 +250,9 @@ type Config struct { // deviceStatsReporter is used to lookup resource usage for alloc devices DeviceStatsReporter cinterfaces.DeviceStatsReporter + // CSIManager is used to manage the mounting of CSI volumes into tasks + CSIManager csimanager.Manager + // DeviceManager is used to mount devices as well as lookup device // statistics DeviceManager devicemanager.Manager @@ -307,6 +314,7 @@ func NewTaskRunner(config *Config) (*TaskRunner, error) { shutdownCtxCancel: trCancel, triggerUpdateCh: make(chan struct{}, triggerUpdateChCap), waitCh: make(chan struct{}), + csiManager: config.CSIManager, devicemanager: config.DeviceManager, driverManager: config.DriverManager, maxEvents: defaultMaxEvents, diff --git a/client/allocrunner/taskrunner/volume_hook.go b/client/allocrunner/taskrunner/volume_hook.go index 1e0935aea..15881df20 100644 --- a/client/allocrunner/taskrunner/volume_hook.go +++ b/client/allocrunner/taskrunner/volume_hook.go @@ -34,6 +34,8 @@ func validateHostVolumes(requestedByAlias map[string]*structs.VolumeRequest, cli var result error for _, req := range requestedByAlias { + // This is a defensive check, but this function should only ever receive + // host-type volumes. if req.Type != structs.VolumeTypeHost { continue } @@ -59,6 +61,12 @@ func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeM return nil, fmt.Errorf("No group volume declaration found named: %s", m.Volume) } + // This is a defensive check, but this function should only ever receive + // host-type volumes. + if req.Type != structs.VolumeTypeHost { + continue + } + hostVolume, ok := clientVolumesByName[req.Source] if !ok { // Should never happen, but unless the client volumes were mutated during @@ -77,22 +85,55 @@ func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeM return mounts, nil } -func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { - volumes := h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes - mounts := h.runner.hookResources.getMounts() +// partitionVolumesByType takes a map of volume-alias to volume-request and +// returns them in the form of volume-type:(volume-alias:volume-request) +func partitionVolumesByType(xs map[string]*structs.VolumeRequest) map[string]map[string]*structs.VolumeRequest { + result := make(map[string]map[string]*structs.VolumeRequest) + for name, req := range xs { + txs, ok := result[req.Type] + if !ok { + txs = make(map[string]*structs.VolumeRequest) + result[req.Type] = txs + } + txs[name] = req + } + return result +} + +func (h *volumeHook) prepareHostVolumes(volumes map[string]*structs.VolumeRequest, req *interfaces.TaskPrestartRequest) ([]*drivers.MountConfig, error) { hostVolumes := h.runner.clientConfig.Node.HostVolumes // Always validate volumes to ensure that we do not allow volumes to be used // if a host is restarted and loses the host volume configuration. if err := validateHostVolumes(volumes, hostVolumes); err != nil { h.logger.Error("Requested Host Volume does not exist", "existing", hostVolumes, "requested", volumes) - return fmt.Errorf("host volume validation error: %v", err) + return nil, fmt.Errorf("host volume validation error: %v", err) } - requestedMounts, err := h.hostVolumeMountConfigurations(req.Task.VolumeMounts, volumes, hostVolumes) + hostVolumeMounts, err := h.hostVolumeMountConfigurations(req.Task.VolumeMounts, volumes, hostVolumes) + if err != nil { + h.logger.Error("Failed to generate host volume mounts", "error", err) + return nil, err + } + + return hostVolumeMounts, nil +} + +func (h *volumeHook) prepareCSIVolumes(req *interfaces.TaskPrestartRequest) ([]*drivers.MountConfig, error) { + return nil, nil +} + +func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { + volumes := partitionVolumesByType(h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes) + + hostVolumeMounts, err := h.prepareHostVolumes(volumes[structs.VolumeTypeHost], req) + if err != nil { + return err + } + + csiVolumeMounts, err := h.prepareCSIVolumes(req) if err != nil { - h.logger.Error("Failed to generate volume mounts", "error", err) return err } @@ -100,17 +141,14 @@ func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartR // already exist. Although this loop is somewhat expensive, there are only // a small number of mounts that exist within most individual tasks. We may // want to revisit this using a `hookdata` param to be "mount only once" -REQUESTED: - for _, m := range requestedMounts { - for _, em := range mounts { - if em.IsEqual(m) { - continue REQUESTED - } - } - - mounts = append(mounts, m) + mounts := h.runner.hookResources.getMounts() + for _, m := range hostVolumeMounts { + mounts = ensureMountpointInserted(mounts, m) + } + for _, m := range csiVolumeMounts { + mounts = ensureMountpointInserted(mounts, m) } - h.runner.hookResources.setMounts(mounts) + return nil } diff --git a/client/client.go b/client/client.go index b67ae389e..fae2b2928 100644 --- a/client/client.go +++ b/client/client.go @@ -262,7 +262,7 @@ type Client struct { pluginManagers *pluginmanager.PluginGroup // csimanager is responsible for managing csi plugins. - csimanager pluginmanager.PluginManager + csimanager csimanager.Manager // devicemanger is responsible for managing device plugins. devicemanager devicemanager.Manager @@ -410,7 +410,7 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic } csiManager := csimanager.New(csiConfig) c.csimanager = csiManager - c.pluginManagers.RegisterAndRun(csiManager) + c.pluginManagers.RegisterAndRun(csiManager.PluginManager()) // Setup the driver manager driverConfig := &drivermanager.Config{ @@ -1084,6 +1084,7 @@ func (c *Client) restoreState() error { PrevAllocWatcher: prevAllocWatcher, PrevAllocMigrator: prevAllocMigrator, DynamicRegistry: c.dynamicRegistry, + CSIManager: c.csimanager, DeviceManager: c.devicemanager, DriverManager: c.drivermanager, ServersContactedCh: c.serversContactedCh, @@ -2347,6 +2348,7 @@ func (c *Client) addAlloc(alloc *structs.Allocation, migrateToken string) error PrevAllocWatcher: prevAllocWatcher, PrevAllocMigrator: prevAllocMigrator, DynamicRegistry: c.dynamicRegistry, + CSIManager: c.csimanager, DeviceManager: c.devicemanager, DriverManager: c.drivermanager, } diff --git a/client/dynamicplugins/registry.go b/client/dynamicplugins/registry.go index ea7bebc26..2bc4c4961 100644 --- a/client/dynamicplugins/registry.go +++ b/client/dynamicplugins/registry.go @@ -52,6 +52,10 @@ type PluginInfo struct { // ConnectionInfo should only be used externally during `RegisterPlugin` and // may not be exposed in the future. ConnectionInfo *PluginConnectionInfo + + // Options is used for plugin registrations to pass further metadata along to + // other subsystems + Options map[string]string } // PluginConnectionInfo is the data required to connect to the plugin. diff --git a/client/pluginmanager/csimanager/doc.go b/client/pluginmanager/csimanager/doc.go new file mode 100644 index 000000000..42400a092 --- /dev/null +++ b/client/pluginmanager/csimanager/doc.go @@ -0,0 +1,15 @@ +/** +csimanager manages locally running CSI Plugins on a Nomad host, and provides a +few different interfaces. + +It provides: +- a pluginmanager.PluginManager implementation that is used to fingerprint and + heartbeat local node plugins +- (TODO) a csimanager.AttachmentWaiter implementation that can be used to wait for an + external CSIVolume to be attached to the node before returning +- (TODO) a csimanager.NodeController implementation that is used to manage the node-local + portions of the CSI specification, and encompassess volume staging/publishing +- (TODO) a csimanager.VolumeChecker implementation that can be used by hooks to ensure + their volumes are healthy(ish) +*/ +package csimanager diff --git a/client/pluginmanager/csimanager/fingerprint.go b/client/pluginmanager/csimanager/fingerprint.go new file mode 100644 index 000000000..d86e68299 --- /dev/null +++ b/client/pluginmanager/csimanager/fingerprint.go @@ -0,0 +1,157 @@ +package csimanager + +import ( + "context" + "fmt" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/client/dynamicplugins" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" +) + +type pluginFingerprinter struct { + logger hclog.Logger + client csi.CSIPlugin + info *dynamicplugins.PluginInfo + + // basicInfo holds a cache of data that should not change within a CSI plugin. + // This allows us to minimize the number of requests we make to plugins on each + // run of the fingerprinter, and reduces the chances of performing overly + // expensive actions repeatedly, and improves stability of data through + // transient failures. + basicInfo *structs.CSIInfo + + fingerprintNode bool + fingerprintController bool + + hadFirstSuccessfulFingerprint bool +} + +func (p *pluginFingerprinter) fingerprint(ctx context.Context) *structs.CSIInfo { + if p.basicInfo == nil { + info, err := p.buildBasicFingerprint(ctx) + if err != nil { + // If we receive a fingerprinting error, update the stats with as much + // info as possible and wait for the next fingerprint interval. + info.HealthDescription = fmt.Sprintf("failed initial fingerprint with err: %v", err) + info.Healthy = false + + return info + } + + // If fingerprinting succeeded, we don't need to repopulate the basic + // info again. + p.basicInfo = info + } + + info := p.basicInfo.Copy() + var fp *structs.CSIInfo + var err error + + if p.fingerprintNode { + fp, err = p.buildNodeFingerprint(ctx, info) + } else if p.fingerprintController { + fp, err = p.buildControllerFingerprint(ctx, info) + } + + if err != nil { + info.Healthy = false + info.HealthDescription = fmt.Sprintf("failed fingerprinting with error: %v", err) + } else { + info = fp + p.hadFirstSuccessfulFingerprint = true + } + + return info +} + +func (p *pluginFingerprinter) buildBasicFingerprint(ctx context.Context) (*structs.CSIInfo, error) { + info := &structs.CSIInfo{ + PluginID: p.info.Name, + Healthy: false, + HealthDescription: "initial fingerprint not completed", + } + + if p.fingerprintNode { + info.NodeInfo = &structs.CSINodeInfo{} + } + if p.fingerprintController { + info.ControllerInfo = &structs.CSIControllerInfo{} + } + + capabilities, err := p.client.PluginGetCapabilities(ctx) + if err != nil { + return info, err + } + + info.RequiresControllerPlugin = capabilities.HasControllerService() + info.RequiresTopologies = capabilities.HasToplogies() + + if p.fingerprintNode { + nodeInfo, err := p.client.NodeGetInfo(ctx) + if err != nil { + return info, err + } + + info.NodeInfo.ID = nodeInfo.NodeID + info.NodeInfo.MaxVolumes = nodeInfo.MaxVolumes + info.NodeInfo.AccessibleTopology = structCSITopologyFromCSITopology(nodeInfo.AccessibleTopology) + } + + return info, nil +} + +func applyCapabilitySetToControllerInfo(cs *csi.ControllerCapabilitySet, info *structs.CSIControllerInfo) { + info.SupportsReadOnlyAttach = cs.HasPublishReadonly + info.SupportsAttachDetach = cs.HasPublishUnpublishVolume + info.SupportsListVolumes = cs.HasListVolumes + info.SupportsListVolumesAttachedNodes = cs.HasListVolumesPublishedNodes +} + +func (p *pluginFingerprinter) buildControllerFingerprint(ctx context.Context, base *structs.CSIInfo) (*structs.CSIInfo, error) { + fp := base.Copy() + + healthy, err := p.client.PluginProbe(ctx) + if err != nil { + return nil, err + } + fp.SetHealthy(healthy) + + caps, err := p.client.ControllerGetCapabilities(ctx) + if err != nil { + return fp, err + } + applyCapabilitySetToControllerInfo(caps, fp.ControllerInfo) + + return fp, nil +} + +func (p *pluginFingerprinter) buildNodeFingerprint(ctx context.Context, base *structs.CSIInfo) (*structs.CSIInfo, error) { + fp := base.Copy() + + healthy, err := p.client.PluginProbe(ctx) + if err != nil { + return nil, err + } + fp.SetHealthy(healthy) + + caps, err := p.client.NodeGetCapabilities(ctx) + if err != nil { + return fp, err + } + fp.NodeInfo.RequiresNodeStageVolume = caps.HasStageUnstageVolume + + return fp, nil +} + +func structCSITopologyFromCSITopology(a *csi.Topology) *structs.CSITopology { + if a == nil { + return nil + } + + return &structs.CSITopology{ + Segments: helper.CopyMapStringString(a.Segments), + } +} diff --git a/client/pluginmanager/csimanager/fingerprint_test.go b/client/pluginmanager/csimanager/fingerprint_test.go new file mode 100644 index 000000000..4ce5951ea --- /dev/null +++ b/client/pluginmanager/csimanager/fingerprint_test.go @@ -0,0 +1,277 @@ +package csimanager + +import ( + "context" + "errors" + "testing" + + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" + "github.com/stretchr/testify/require" +) + +func TestBuildBasicFingerprint_Node(t *testing.T) { + tt := []struct { + Name string + + Capabilities *csi.PluginCapabilitySet + CapabilitiesErr error + CapabilitiesCallCount int64 + + NodeInfo *csi.NodeGetInfoResponse + NodeInfoErr error + NodeInfoCallCount int64 + + ExpectedCSIInfo *structs.CSIInfo + ExpectedErr error + }{ + { + Name: "Minimal successful response", + + Capabilities: &csi.PluginCapabilitySet{}, + CapabilitiesCallCount: 1, + + NodeInfo: &csi.NodeGetInfoResponse{ + NodeID: "foobar", + MaxVolumes: 5, + AccessibleTopology: nil, + }, + NodeInfoCallCount: 1, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + NodeInfo: &structs.CSINodeInfo{ + ID: "foobar", + MaxVolumes: 5, + }, + }, + }, + { + Name: "Successful response with capabilities and topologies", + + Capabilities: csi.NewTestPluginCapabilitySet(true, false), + CapabilitiesCallCount: 1, + + NodeInfo: &csi.NodeGetInfoResponse{ + NodeID: "foobar", + MaxVolumes: 5, + AccessibleTopology: &csi.Topology{ + Segments: map[string]string{ + "com.hashicorp.nomad/node-id": "foobar", + }, + }, + }, + NodeInfoCallCount: 1, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + + RequiresTopologies: true, + + NodeInfo: &structs.CSINodeInfo{ + ID: "foobar", + MaxVolumes: 5, + AccessibleTopology: &structs.CSITopology{ + Segments: map[string]string{ + "com.hashicorp.nomad/node-id": "foobar", + }, + }, + }, + }, + }, + { + Name: "PluginGetCapabilities Failed", + + CapabilitiesErr: errors.New("request failed"), + CapabilitiesCallCount: 1, + + NodeInfoCallCount: 0, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + NodeInfo: &structs.CSINodeInfo{}, + }, + ExpectedErr: errors.New("request failed"), + }, + { + Name: "NodeGetInfo Failed", + + Capabilities: &csi.PluginCapabilitySet{}, + CapabilitiesCallCount: 1, + + NodeInfoErr: errors.New("request failed"), + NodeInfoCallCount: 1, + + ExpectedCSIInfo: &structs.CSIInfo{ + PluginID: "test-plugin", + Healthy: false, + HealthDescription: "initial fingerprint not completed", + NodeInfo: &structs.CSINodeInfo{}, + }, + ExpectedErr: errors.New("request failed"), + }, + } + + for _, test := range tt { + t.Run(test.Name, func(t *testing.T) { + client, im := setupTestNodeInstanceManager(t) + + client.NextPluginGetCapabilitiesResponse = test.Capabilities + client.NextPluginGetCapabilitiesErr = test.CapabilitiesErr + + client.NextNodeGetInfoResponse = test.NodeInfo + client.NextNodeGetInfoErr = test.NodeInfoErr + + info, err := im.fp.buildBasicFingerprint(context.TODO()) + + require.Equal(t, test.ExpectedCSIInfo, info) + require.Equal(t, test.ExpectedErr, err) + + require.Equal(t, test.CapabilitiesCallCount, client.PluginGetCapabilitiesCallCount) + require.Equal(t, test.NodeInfoCallCount, client.NodeGetInfoCallCount) + }) + } +} + +func TestBuildControllerFingerprint(t *testing.T) { + tt := []struct { + Name string + + Capabilities *csi.ControllerCapabilitySet + CapabilitiesErr error + CapabilitiesCallCount int64 + + ProbeResponse bool + ProbeErr error + ProbeCallCount int64 + + ExpectedControllerInfo *structs.CSIControllerInfo + ExpectedErr error + }{ + { + Name: "Minimal successful response", + + Capabilities: &csi.ControllerCapabilitySet{}, + CapabilitiesCallCount: 1, + + ProbeResponse: true, + ProbeCallCount: 1, + + ExpectedControllerInfo: &structs.CSIControllerInfo{}, + }, + { + Name: "Successful response with capabilities", + + Capabilities: &csi.ControllerCapabilitySet{ + HasListVolumes: true, + }, + CapabilitiesCallCount: 1, + + ProbeResponse: true, + ProbeCallCount: 1, + + ExpectedControllerInfo: &structs.CSIControllerInfo{ + SupportsListVolumes: true, + }, + }, + { + Name: "ControllerGetCapabilities Failed", + + CapabilitiesErr: errors.New("request failed"), + CapabilitiesCallCount: 1, + + ProbeResponse: true, + ProbeCallCount: 1, + + ExpectedControllerInfo: &structs.CSIControllerInfo{}, + ExpectedErr: errors.New("request failed"), + }, + } + + for _, test := range tt { + t.Run(test.Name, func(t *testing.T) { + client, im := setupTestNodeInstanceManager(t) + + client.NextControllerGetCapabilitiesResponse = test.Capabilities + client.NextControllerGetCapabilitiesErr = test.CapabilitiesErr + + client.NextPluginProbeResponse = test.ProbeResponse + client.NextPluginProbeErr = test.ProbeErr + + info, err := im.fp.buildControllerFingerprint(context.TODO(), &structs.CSIInfo{ControllerInfo: &structs.CSIControllerInfo{}}) + + require.Equal(t, test.ExpectedControllerInfo, info.ControllerInfo) + require.Equal(t, test.ExpectedErr, err) + + require.Equal(t, test.CapabilitiesCallCount, client.ControllerGetCapabilitiesCallCount) + require.Equal(t, test.ProbeCallCount, client.PluginProbeCallCount) + }) + } +} + +func TestBuildNodeFingerprint(t *testing.T) { + tt := []struct { + Name string + + Capabilities *csi.NodeCapabilitySet + CapabilitiesErr error + CapabilitiesCallCount int64 + + ExpectedCSINodeInfo *structs.CSINodeInfo + ExpectedErr error + }{ + { + Name: "Minimal successful response", + + Capabilities: &csi.NodeCapabilitySet{}, + CapabilitiesCallCount: 1, + + ExpectedCSINodeInfo: &structs.CSINodeInfo{ + RequiresNodeStageVolume: false, + }, + }, + { + Name: "Successful response with capabilities and topologies", + + Capabilities: &csi.NodeCapabilitySet{ + HasStageUnstageVolume: true, + }, + CapabilitiesCallCount: 1, + + ExpectedCSINodeInfo: &structs.CSINodeInfo{ + RequiresNodeStageVolume: true, + }, + }, + { + Name: "NodeGetCapabilities Failed", + + CapabilitiesErr: errors.New("request failed"), + CapabilitiesCallCount: 1, + + ExpectedCSINodeInfo: &structs.CSINodeInfo{}, + ExpectedErr: errors.New("request failed"), + }, + } + + for _, test := range tt { + t.Run(test.Name, func(t *testing.T) { + client, im := setupTestNodeInstanceManager(t) + + client.NextNodeGetCapabilitiesResponse = test.Capabilities + client.NextNodeGetCapabilitiesErr = test.CapabilitiesErr + + info, err := im.fp.buildNodeFingerprint(context.TODO(), &structs.CSIInfo{NodeInfo: &structs.CSINodeInfo{}}) + + require.Equal(t, test.ExpectedCSINodeInfo, info.NodeInfo) + require.Equal(t, test.ExpectedErr, err) + + require.Equal(t, test.CapabilitiesCallCount, client.NodeGetCapabilitiesCallCount) + }) + } +} diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index 5240c9b8f..7ebf683f9 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -2,13 +2,11 @@ package csimanager import ( "context" - "fmt" + "sync" "time" "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/client/dynamicplugins" - "github.com/hashicorp/nomad/helper" - "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/csi" ) @@ -26,22 +24,38 @@ type instanceManager struct { shutdownCtxCancelFn context.CancelFunc shutdownCh chan struct{} - fingerprintNode bool - fingerprintController bool + // mountPoint is the root of the mount dir where plugin specific data may be + // stored and where mount points will be created + mountPoint string + + fp *pluginFingerprinter + + volumeManager *volumeManager + volumeManagerMu sync.RWMutex + volumeManagerSetupCh chan struct{} + volumeManagerSetup bool client csi.CSIPlugin } func newInstanceManager(logger hclog.Logger, updater UpdateNodeCSIInfoFunc, p *dynamicplugins.PluginInfo) *instanceManager { ctx, cancelFn := context.WithCancel(context.Background()) - + logger = logger.Named(p.Name) return &instanceManager{ - logger: logger.Named(p.Name), + logger: logger, info: p, updater: updater, - fingerprintNode: p.Type == dynamicplugins.PluginTypeCSINode, - fingerprintController: p.Type == dynamicplugins.PluginTypeCSIController, + fp: &pluginFingerprinter{ + logger: logger.Named("fingerprinter"), + info: p, + fingerprintNode: p.Type == dynamicplugins.PluginTypeCSINode, + fingerprintController: p.Type == dynamicplugins.PluginTypeCSIController, + }, + + mountPoint: p.Options["MountPoint"], + + volumeManagerSetupCh: make(chan struct{}), shutdownCtx: ctx, shutdownCtxCancelFn: cancelFn, @@ -50,7 +64,7 @@ func newInstanceManager(logger hclog.Logger, updater UpdateNodeCSIInfoFunc, p *d } func (i *instanceManager) run() { - c, err := csi.NewClient(i.info.ConnectionInfo.SocketPath, i.logger.Named("csi_client").With("plugin.name", i.info.Name, "plugin.type", i.info.Type)) + c, err := csi.NewClient(i.info.ConnectionInfo.SocketPath, i.logger) if err != nil { i.logger.Error("failed to setup instance manager client", "error", err) close(i.shutdownCh) @@ -61,18 +75,37 @@ func (i *instanceManager) run() { go i.runLoop() } +// VolumeMounter returns the volume manager that is configured for the given plugin +// instance. If called before the volume manager has been setup, it will block until +// the volume manager is ready or the context is closed. +func (i *instanceManager) VolumeMounter(ctx context.Context) (VolumeMounter, error) { + var vm VolumeMounter + i.volumeManagerMu.RLock() + if i.volumeManagerSetup { + vm = i.volumeManager + } + i.volumeManagerMu.RUnlock() + + if vm != nil { + return vm, nil + } + + select { + case <-i.volumeManagerSetupCh: + i.volumeManagerMu.RLock() + vm = i.volumeManager + i.volumeManagerMu.RUnlock() + return vm, nil + case <-ctx.Done(): + return nil, ctx.Err() + } +} + func (i *instanceManager) requestCtxWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) { return context.WithTimeout(i.shutdownCtx, timeout) } func (i *instanceManager) runLoop() { - // basicInfo holds a cache of data that should not change within a CSI plugin. - // This allows us to minimize the number of requests we make to plugins on each - // run of the fingerprinter, and reduces the chances of performing overly - // expensive actions repeatedly, and improves stability of data through - // transient failures. - var basicInfo *structs.CSIInfo - timer := time.NewTimer(0) for { select { @@ -86,136 +119,30 @@ func (i *instanceManager) runLoop() { case <-timer.C: ctx, cancelFn := i.requestCtxWithTimeout(managerFingerprintInterval) - if basicInfo == nil { - info, err := i.buildBasicFingerprint(ctx) - if err != nil { - // If we receive a fingerprinting error, update the stats with as much - // info as possible and wait for the next fingerprint interval. - info.HealthDescription = fmt.Sprintf("failed initial fingerprint with err: %v", err) - cancelFn() - i.updater(i.info.Name, basicInfo) - timer.Reset(managerFingerprintInterval) - continue - } - - // If fingerprinting succeeded, we don't need to repopulate the basic - // info and we can stop here. - basicInfo = info - } - - info := basicInfo.Copy() - var fp *structs.CSIInfo - var err error - - if i.fingerprintNode { - fp, err = i.buildNodeFingerprint(ctx, info) - } else if i.fingerprintController { - fp, err = i.buildControllerFingerprint(ctx, info) - } - - if err != nil { - info.Healthy = false - info.HealthDescription = fmt.Sprintf("failed fingerprinting with error: %v", err) - } else { - info = fp - } - + info := i.fp.fingerprint(ctx) cancelFn() i.updater(i.info.Name, info) + + // TODO: refactor this lock into a faster, goroutine-local check + i.volumeManagerMu.RLock() + // When we've had a successful fingerprint, and the volume manager is not yet setup, + // and one is required (we're running a node plugin), then set one up now. + if i.fp.hadFirstSuccessfulFingerprint && !i.volumeManagerSetup && i.fp.fingerprintNode { + i.volumeManagerMu.RUnlock() + i.volumeManagerMu.Lock() + i.volumeManager = newVolumeManager(i.logger, i.client, i.mountPoint, info.NodeInfo.RequiresNodeStageVolume) + i.volumeManagerSetup = true + close(i.volumeManagerSetupCh) + i.volumeManagerMu.Unlock() + } else { + i.volumeManagerMu.RUnlock() + } + timer.Reset(managerFingerprintInterval) } } } -func applyCapabilitySetToControllerInfo(cs *csi.ControllerCapabilitySet, info *structs.CSIControllerInfo) { - info.SupportsReadOnlyAttach = cs.HasPublishReadonly - info.SupportsAttachDetach = cs.HasPublishUnpublishVolume - info.SupportsListVolumes = cs.HasListVolumes - info.SupportsListVolumesAttachedNodes = cs.HasListVolumesPublishedNodes -} - -func (i *instanceManager) buildControllerFingerprint(ctx context.Context, base *structs.CSIInfo) (*structs.CSIInfo, error) { - fp := base.Copy() - - healthy, err := i.client.PluginProbe(ctx) - if err != nil { - return nil, err - } - fp.SetHealthy(healthy) - - caps, err := i.client.ControllerGetCapabilities(ctx) - if err != nil { - return fp, err - } - applyCapabilitySetToControllerInfo(caps, fp.ControllerInfo) - - return fp, nil -} - -func (i *instanceManager) buildNodeFingerprint(ctx context.Context, base *structs.CSIInfo) (*structs.CSIInfo, error) { - fp := base.Copy() - - healthy, err := i.client.PluginProbe(ctx) - if err != nil { - return nil, err - } - fp.SetHealthy(healthy) - - caps, err := i.client.NodeGetCapabilities(ctx) - if err != nil { - return fp, err - } - fp.NodeInfo.RequiresNodeStageVolume = caps.HasStageUnstageVolume - - return fp, nil -} - -func structCSITopologyFromCSITopology(a *csi.Topology) *structs.CSITopology { - if a == nil { - return nil - } - - return &structs.CSITopology{ - Segments: helper.CopyMapStringString(a.Segments), - } -} - -func (i *instanceManager) buildBasicFingerprint(ctx context.Context) (*structs.CSIInfo, error) { - info := &structs.CSIInfo{ - PluginID: i.info.Name, - Healthy: false, - HealthDescription: "initial fingerprint not completed", - } - - if i.fingerprintNode { - info.NodeInfo = &structs.CSINodeInfo{} - } - if i.fingerprintController { - info.ControllerInfo = &structs.CSIControllerInfo{} - } - - capabilities, err := i.client.PluginGetCapabilities(ctx) - if err != nil { - return info, err - } - - info.RequiresControllerPlugin = capabilities.HasControllerService() - info.RequiresTopologies = capabilities.HasToplogies() - - if i.fingerprintNode { - nodeInfo, err := i.client.NodeGetInfo(ctx) - if err != nil { - return info, err - } - - info.NodeInfo.ID = nodeInfo.NodeID - info.NodeInfo.MaxVolumes = nodeInfo.MaxVolumes - info.NodeInfo.AccessibleTopology = structCSITopologyFromCSITopology(nodeInfo.AccessibleTopology) - } - - return info, nil -} - func (i *instanceManager) shutdown() { i.shutdownCtxCancelFn() <-i.shutdownCh diff --git a/client/pluginmanager/csimanager/instance_test.go b/client/pluginmanager/csimanager/instance_test.go index eeed5b57f..c6b53043f 100644 --- a/client/pluginmanager/csimanager/instance_test.go +++ b/client/pluginmanager/csimanager/instance_test.go @@ -1,16 +1,11 @@ package csimanager import ( - "context" - "errors" "testing" "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/helper/testlog" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/plugins/csi" "github.com/hashicorp/nomad/plugins/csi/fake" - "github.com/stretchr/testify/require" ) func setupTestNodeInstanceManager(t *testing.T) (*fake.Client, *instanceManager) { @@ -22,275 +17,14 @@ func setupTestNodeInstanceManager(t *testing.T) (*fake.Client, *instanceManager) } return tp, &instanceManager{ - logger: logger, - info: pinfo, - client: tp, - fingerprintNode: true, - } -} - -func TestBuildBasicFingerprint_Node(t *testing.T) { - tt := []struct { - Name string - - Capabilities *csi.PluginCapabilitySet - CapabilitiesErr error - CapabilitiesCallCount int64 - - NodeInfo *csi.NodeGetInfoResponse - NodeInfoErr error - NodeInfoCallCount int64 - - ExpectedCSIInfo *structs.CSIInfo - ExpectedErr error - }{ - { - Name: "Minimal successful response", - - Capabilities: &csi.PluginCapabilitySet{}, - CapabilitiesCallCount: 1, - - NodeInfo: &csi.NodeGetInfoResponse{ - NodeID: "foobar", - MaxVolumes: 5, - AccessibleTopology: nil, - }, - NodeInfoCallCount: 1, - - ExpectedCSIInfo: &structs.CSIInfo{ - PluginID: "test-plugin", - Healthy: false, - HealthDescription: "initial fingerprint not completed", - NodeInfo: &structs.CSINodeInfo{ - ID: "foobar", - MaxVolumes: 5, - }, - }, - }, - { - Name: "Successful response with capabilities and topologies", - - Capabilities: csi.NewTestPluginCapabilitySet(true, false), - CapabilitiesCallCount: 1, - - NodeInfo: &csi.NodeGetInfoResponse{ - NodeID: "foobar", - MaxVolumes: 5, - AccessibleTopology: &csi.Topology{ - Segments: map[string]string{ - "com.hashicorp.nomad/node-id": "foobar", - }, - }, - }, - NodeInfoCallCount: 1, - - ExpectedCSIInfo: &structs.CSIInfo{ - PluginID: "test-plugin", - Healthy: false, - HealthDescription: "initial fingerprint not completed", - - RequiresTopologies: true, - - NodeInfo: &structs.CSINodeInfo{ - ID: "foobar", - MaxVolumes: 5, - AccessibleTopology: &structs.CSITopology{ - Segments: map[string]string{ - "com.hashicorp.nomad/node-id": "foobar", - }, - }, - }, - }, - }, - { - Name: "PluginGetCapabilities Failed", - - CapabilitiesErr: errors.New("request failed"), - CapabilitiesCallCount: 1, - - NodeInfoCallCount: 0, - - ExpectedCSIInfo: &structs.CSIInfo{ - PluginID: "test-plugin", - Healthy: false, - HealthDescription: "initial fingerprint not completed", - NodeInfo: &structs.CSINodeInfo{}, - }, - ExpectedErr: errors.New("request failed"), - }, - { - Name: "NodeGetInfo Failed", - - Capabilities: &csi.PluginCapabilitySet{}, - CapabilitiesCallCount: 1, - - NodeInfoErr: errors.New("request failed"), - NodeInfoCallCount: 1, - - ExpectedCSIInfo: &structs.CSIInfo{ - PluginID: "test-plugin", - Healthy: false, - HealthDescription: "initial fingerprint not completed", - NodeInfo: &structs.CSINodeInfo{}, - }, - ExpectedErr: errors.New("request failed"), - }, - } - - for _, test := range tt { - t.Run(test.Name, func(t *testing.T) { - client, im := setupTestNodeInstanceManager(t) - - client.NextPluginGetCapabilitiesResponse = test.Capabilities - client.NextPluginGetCapabilitiesErr = test.CapabilitiesErr - - client.NextNodeGetInfoResponse = test.NodeInfo - client.NextNodeGetInfoErr = test.NodeInfoErr - - info, err := im.buildBasicFingerprint(context.TODO()) - - require.Equal(t, test.ExpectedCSIInfo, info) - require.Equal(t, test.ExpectedErr, err) - - require.Equal(t, test.CapabilitiesCallCount, client.PluginGetCapabilitiesCallCount) - require.Equal(t, test.NodeInfoCallCount, client.NodeGetInfoCallCount) - }) - } -} - -func TestBuildControllerFingerprint(t *testing.T) { - tt := []struct { - Name string - - Capabilities *csi.ControllerCapabilitySet - CapabilitiesErr error - CapabilitiesCallCount int64 - - ProbeResponse bool - ProbeErr error - ProbeCallCount int64 - - ExpectedControllerInfo *structs.CSIControllerInfo - ExpectedErr error - }{ - { - Name: "Minimal successful response", - - Capabilities: &csi.ControllerCapabilitySet{}, - CapabilitiesCallCount: 1, - - ProbeResponse: true, - ProbeCallCount: 1, - - ExpectedControllerInfo: &structs.CSIControllerInfo{}, - }, - { - Name: "Successful response with capabilities", - - Capabilities: &csi.ControllerCapabilitySet{ - HasListVolumes: true, - }, - CapabilitiesCallCount: 1, - - ProbeResponse: true, - ProbeCallCount: 1, - - ExpectedControllerInfo: &structs.CSIControllerInfo{ - SupportsListVolumes: true, - }, - }, - { - Name: "ControllerGetCapabilities Failed", - - CapabilitiesErr: errors.New("request failed"), - CapabilitiesCallCount: 1, - - ProbeResponse: true, - ProbeCallCount: 1, - - ExpectedControllerInfo: &structs.CSIControllerInfo{}, - ExpectedErr: errors.New("request failed"), - }, - } - - for _, test := range tt { - t.Run(test.Name, func(t *testing.T) { - client, im := setupTestNodeInstanceManager(t) - - client.NextControllerGetCapabilitiesResponse = test.Capabilities - client.NextControllerGetCapabilitiesErr = test.CapabilitiesErr - - client.NextPluginProbeResponse = test.ProbeResponse - client.NextPluginProbeErr = test.ProbeErr - - info, err := im.buildControllerFingerprint(context.TODO(), &structs.CSIInfo{ControllerInfo: &structs.CSIControllerInfo{}}) - - require.Equal(t, test.ExpectedControllerInfo, info.ControllerInfo) - require.Equal(t, test.ExpectedErr, err) - - require.Equal(t, test.CapabilitiesCallCount, client.ControllerGetCapabilitiesCallCount) - require.Equal(t, test.ProbeCallCount, client.PluginProbeCallCount) - }) - } -} - -func TestBuildNodeFingerprint(t *testing.T) { - tt := []struct { - Name string - - Capabilities *csi.NodeCapabilitySet - CapabilitiesErr error - CapabilitiesCallCount int64 - - ExpectedCSINodeInfo *structs.CSINodeInfo - ExpectedErr error - }{ - { - Name: "Minimal successful response", - - Capabilities: &csi.NodeCapabilitySet{}, - CapabilitiesCallCount: 1, - - ExpectedCSINodeInfo: &structs.CSINodeInfo{ - RequiresNodeStageVolume: false, - }, - }, - { - Name: "Successful response with capabilities and topologies", - - Capabilities: &csi.NodeCapabilitySet{ - HasStageUnstageVolume: true, - }, - CapabilitiesCallCount: 1, - - ExpectedCSINodeInfo: &structs.CSINodeInfo{ - RequiresNodeStageVolume: true, - }, - }, - { - Name: "NodeGetCapabilities Failed", - - CapabilitiesErr: errors.New("request failed"), - CapabilitiesCallCount: 1, - - ExpectedCSINodeInfo: &structs.CSINodeInfo{}, - ExpectedErr: errors.New("request failed"), - }, - } - - for _, test := range tt { - t.Run(test.Name, func(t *testing.T) { - client, im := setupTestNodeInstanceManager(t) - - client.NextNodeGetCapabilitiesResponse = test.Capabilities - client.NextNodeGetCapabilitiesErr = test.CapabilitiesErr - - info, err := im.buildNodeFingerprint(context.TODO(), &structs.CSIInfo{NodeInfo: &structs.CSINodeInfo{}}) - - require.Equal(t, test.ExpectedCSINodeInfo, info.NodeInfo) - require.Equal(t, test.ExpectedErr, err) - - require.Equal(t, test.CapabilitiesCallCount, client.NodeGetCapabilitiesCallCount) - }) + logger: logger, + info: pinfo, + client: tp, + fp: &pluginFingerprinter{ + logger: logger.Named("fingerprinter"), + info: pinfo, + client: tp, + fingerprintNode: true, + }, } } diff --git a/client/pluginmanager/csimanager/interface.go b/client/pluginmanager/csimanager/interface.go new file mode 100644 index 000000000..4944bb2a3 --- /dev/null +++ b/client/pluginmanager/csimanager/interface.go @@ -0,0 +1,34 @@ +package csimanager + +import ( + "context" + "errors" + + "github.com/hashicorp/nomad/client/pluginmanager" + "github.com/hashicorp/nomad/nomad/structs" +) + +var ( + DriverNotFoundErr = errors.New("Driver not found") +) + +type MountInfo struct { +} + +type VolumeMounter interface { + MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) (*MountInfo, error) + UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) error +} + +type Manager interface { + // PluginManager returns a PluginManager for use by the node fingerprinter. + PluginManager() pluginmanager.PluginManager + + // MounterForVolume returns a VolumeMounter for the given requested volume. + // If there is no plugin registered for this volume type, a DriverNotFoundErr + // will be returned. + MounterForVolume(ctx context.Context, volume *structs.CSIVolume) (VolumeMounter, error) + + // Shutdown shuts down the Manager and unmounts any locally attached volumes. + Shutdown() +} diff --git a/client/pluginmanager/csimanager/manager.go b/client/pluginmanager/csimanager/manager.go index ebcbcc89f..b32caed75 100644 --- a/client/pluginmanager/csimanager/manager.go +++ b/client/pluginmanager/csimanager/manager.go @@ -28,10 +28,9 @@ type Config struct { // New returns a new PluginManager that will handle managing CSI plugins from // the dynamicRegistry from the provided Config. -func New(config *Config) pluginmanager.PluginManager { +func New(config *Config) Manager { // Use a dedicated internal context for managing plugin shutdown. ctx, cancelFn := context.WithCancel(context.Background()) - if config.PluginResyncPeriod == 0 { config.PluginResyncPeriod = defaultPluginResyncPeriod } @@ -66,6 +65,24 @@ type csiManager struct { shutdownCh chan struct{} } +func (c *csiManager) PluginManager() pluginmanager.PluginManager { + return c +} + +func (c *csiManager) MounterForVolume(ctx context.Context, vol *structs.CSIVolume) (VolumeMounter, error) { + nodePlugins, hasAnyNodePlugins := c.instances["csi-node"] + if !hasAnyNodePlugins { + return nil, DriverNotFoundErr + } + + mgr, hasDriver := nodePlugins[vol.Driver] + if !hasDriver { + return nil, DriverNotFoundErr + } + + return mgr.VolumeMounter(ctx) +} + // Run starts a plugin manager and should return early func (c *csiManager) Run() { go c.runLoop() diff --git a/client/pluginmanager/csimanager/manager_test.go b/client/pluginmanager/csimanager/manager_test.go index 408168ca2..176dc6003 100644 --- a/client/pluginmanager/csimanager/manager_test.go +++ b/client/pluginmanager/csimanager/manager_test.go @@ -28,7 +28,7 @@ func setupRegistry() dynamicplugins.Registry { }) } -func TestCSIManager_Setup_Shutdown(t *testing.T) { +func TestManager_Setup_Shutdown(t *testing.T) { r := setupRegistry() defer r.Shutdown() @@ -42,7 +42,7 @@ func TestCSIManager_Setup_Shutdown(t *testing.T) { pm.Shutdown() } -func TestCSIManager_RegisterPlugin(t *testing.T) { +func TestManager_RegisterPlugin(t *testing.T) { registry := setupRegistry() defer registry.Shutdown() @@ -74,7 +74,7 @@ func TestCSIManager_RegisterPlugin(t *testing.T) { }, 5*time.Second, 10*time.Millisecond) } -func TestCSIManager_DeregisterPlugin(t *testing.T) { +func TestManager_DeregisterPlugin(t *testing.T) { registry := setupRegistry() defer registry.Shutdown() diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go new file mode 100644 index 000000000..158efd7b5 --- /dev/null +++ b/client/pluginmanager/csimanager/volume.go @@ -0,0 +1,64 @@ +package csimanager + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" +) + +var _ VolumeMounter = &volumeManager{} + +const ( + DefaultMountActionTimeout = 2 * time.Minute + StagingDirName = "staging" + AllocSpecificDirName = "per-alloc" +) + +// volumeManager handles the state of attached volumes for a given CSI Plugin. +// +// volumeManagers outlive the lifetime of a given allocation as volumes may be +// shared by multiple allocations on the same node. +// +// volumes are stored by an enriched volume usage struct as the CSI Spec requires +// slightly different usage based on the given usage model. +type volumeManager struct { + logger hclog.Logger + plugin csi.CSIPlugin + + volumes map[string]interface{} + // volumesMu sync.Mutex + + // mountRoot is the root of where plugin directories and mounts may be created + // e.g /opt/nomad.d/statedir/csi/my-csi-plugin/ + mountRoot string + + // requiresStaging shows whether the plugin requires that the volume manager + // calls NodeStageVolume and NodeUnstageVolume RPCs during setup and teardown + requiresStaging bool +} + +func newVolumeManager(logger hclog.Logger, plugin csi.CSIPlugin, rootDir string, requiresStaging bool) *volumeManager { + return &volumeManager{ + logger: logger.Named("volume_manager"), + plugin: plugin, + mountRoot: rootDir, + requiresStaging: requiresStaging, + volumes: make(map[string]interface{}), + } +} + +// MountVolume performs the steps required for using a given volume +// configuration for the provided allocation. +// +// TODO: Validate remote volume attachment and implement. +func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) (*MountInfo, error) { + return nil, fmt.Errorf("Unimplemented") +} + +func (v *volumeManager) UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) error { + return fmt.Errorf("Unimplemented") +} From 88316208a0f5a880279dc883e57778761ac2e777 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Tue, 28 Jan 2020 10:28:34 -0500 Subject: [PATCH 046/126] csi: server-side plugin state tracking and api (#6966) * structs: CSIPlugin indexes jobs acting as plugins and node updates * schema: csi_plugins table for CSIPlugin * nomad: csi_endpoint use vol.Denormalize, plugin requests * nomad: csi_volume_endpoint: rename to csi_endpoint * agent: add CSI plugin endpoints * state_store_test: use generated ids to avoid t.Parallel conflicts * contributing: add note about registering new RPC structs * command: agent http register plugin lists * api: CSI plugin queries, ControllerHealthy -> ControllersHealthy * state_store: copy on write for volumes and plugins * structs: copy on write for volumes and plugins * state_store: CSIVolumeByID returns an unhealthy volume, denormalize * nomad: csi_endpoint use CSIVolumeDenormalizePlugins * structs: remove struct errors for missing objects * nomad: csi_endpoint return nil for missing objects, not errors * api: return meta from Register to avoid EOF error * state_store: CSIVolumeDenormalize keep allocs in their own maps * state_store: CSIVolumeDeregister error on missing volume * state_store: CSIVolumeRegister set indexes * nomad: csi_endpoint use CSIVolumeDenormalizePlugins tests --- api/csi.go | 133 +++++-- api/csi_test.go | 93 ++++- command/agent/csi_endpoint.go | 59 ++- command/agent/http.go | 2 + contributing/checklist-rpc-endpoint.md | 1 + ...csi_volume_endpoint.go => csi_endpoint.go} | 136 ++++++- ..._endpoint_test.go => csi_endpoint_test.go} | 113 +++++- nomad/server.go | 3 + nomad/state/schema.go | 28 +- nomad/state/state_store.go | 360 +++++++++++++++++- nomad/state/state_store_test.go | 151 +++++++- nomad/structs/csi.go | 343 ++++++++++++++--- nomad/structs/csi_test.go | 2 +- nomad/structs/errors.go | 2 - 14 files changed, 1252 insertions(+), 174 deletions(-) rename nomad/{csi_volume_endpoint.go => csi_endpoint.go} (63%) rename nomad/{csi_volume_endpoint_test.go => csi_endpoint_test.go} (69%) diff --git a/api/csi.go b/api/csi.go index 5168614ef..a3f4452fd 100644 --- a/api/csi.go +++ b/api/csi.go @@ -10,12 +10,12 @@ type CSIVolumes struct { client *Client } -// CSIVolumes returns a handle on the allocs endpoints. +// CSIVolumes returns a handle on the CSIVolumes endpoint func (c *Client) CSIVolumes() *CSIVolumes { return &CSIVolumes{client: c} } -// List returns all CSI volumes, ignoring driver +// List returns all CSI volumes func (v *CSIVolumes) List(q *QueryOptions) ([]*CSIVolumeListStub, *QueryMeta, error) { var resp []*CSIVolumeListStub qm, err := v.client.query("/v1/csi/volumes", &resp, q) @@ -26,12 +26,12 @@ func (v *CSIVolumes) List(q *QueryOptions) ([]*CSIVolumeListStub, *QueryMeta, er return resp, qm, nil } -// DriverList returns all CSI volumes for the specified driver -func (v *CSIVolumes) DriverList(driver string) ([]*CSIVolumeListStub, *QueryMeta, error) { - return v.List(&QueryOptions{Prefix: driver}) +// PluginList returns all CSI volumes for the specified plugin id +func (v *CSIVolumes) PluginList(pluginID string) ([]*CSIVolumeListStub, *QueryMeta, error) { + return v.List(&QueryOptions{Prefix: pluginID}) } -// Info is used to retrieve a single allocation. +// Info is used to retrieve a single CSIVolume func (v *CSIVolumes) Info(id string, q *QueryOptions) (*CSIVolume, *QueryMeta, error) { var resp CSIVolume qm, err := v.client.query("/v1/csi/volume/"+id, &resp, q) @@ -41,13 +41,12 @@ func (v *CSIVolumes) Info(id string, q *QueryOptions) (*CSIVolume, *QueryMeta, e return &resp, qm, nil } -func (v *CSIVolumes) Register(vol *CSIVolume, w *WriteOptions) error { +func (v *CSIVolumes) Register(vol *CSIVolume, w *WriteOptions) (*WriteMeta, error) { req := CSIVolumeRegisterRequest{ Volumes: []*CSIVolume{vol}, } - var resp struct{} - _, err := v.client.write("/v1/csi/volume/"+vol.ID, req, &resp, w) - return err + meta, err := v.client.write("/v1/csi/volume/"+vol.ID, req, nil, w) + return meta, err } func (v *CSIVolumes) Deregister(id string, w *WriteOptions) error { @@ -81,7 +80,6 @@ const ( // CSIVolume is used for serialization, see also nomad/structs/csi.go type CSIVolume struct { ID string - Driver string Namespace string Topologies []*CSITopology AccessMode CSIVolumeAccessMode @@ -92,16 +90,17 @@ type CSIVolume struct { // Healthy is true iff all the denormalized plugin health fields are true, and the // volume has not been marked for garbage collection - Healthy bool - VolumeGC time.Time - ControllerName string - ControllerHealthy bool - NodeHealthy int - NodeExpected int - ResourceExhausted time.Time + Healthy bool + VolumeGC time.Time + PluginID string + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + ResourceExhausted time.Time - CreatedIndex uint64 - ModifiedIndex uint64 + CreateIndex uint64 + ModifyIndex uint64 } type CSIVolumeIndexSort []*CSIVolumeListStub @@ -111,7 +110,7 @@ func (v CSIVolumeIndexSort) Len() int { } func (v CSIVolumeIndexSort) Less(i, j int) bool { - return v[i].CreatedIndex > v[j].CreatedIndex + return v[i].CreateIndex > v[j].CreateIndex } func (v CSIVolumeIndexSort) Swap(i, j int) { @@ -121,7 +120,6 @@ func (v CSIVolumeIndexSort) Swap(i, j int) { // CSIVolumeListStub omits allocations. See also nomad/structs/csi.go type CSIVolumeListStub struct { ID string - Driver string Namespace string Topologies []*CSITopology AccessMode CSIVolumeAccessMode @@ -129,16 +127,17 @@ type CSIVolumeListStub struct { // Healthy is true iff all the denormalized plugin health fields are true, and the // volume has not been marked for garbage collection - Healthy bool - VolumeGC time.Time - ControllerName string - ControllerHealthy bool - NodeHealthy int - NodeExpected int - ResourceExhausted time.Time + Healthy bool + VolumeGC time.Time + PluginID string + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + ResourceExhausted time.Time - CreatedIndex uint64 - ModifiedIndex uint64 + CreateIndex uint64 + ModifyIndex uint64 } type CSIVolumeRegisterRequest struct { @@ -150,3 +149,75 @@ type CSIVolumeDeregisterRequest struct { VolumeIDs []string WriteRequest } + +// CSI Plugins are jobs with plugin specific data +type CSIPlugins struct { + client *Client +} + +type CSIPlugin struct { + ID string + Type CSIPluginType + Namespace string + Jobs map[string]map[string]*Job + + ControllersHealthy int + Controllers map[string]*CSIInfo + NodesHealthy int + Nodes map[string]*CSIInfo + + CreateIndex uint64 + ModifyIndex uint64 +} + +type CSIPluginListStub struct { + ID string + Type CSIPluginType + JobIDs map[string]map[string]struct{} + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + CreateIndex uint64 + ModifyIndex uint64 +} + +type CSIPluginIndexSort []*CSIPluginListStub + +func (v CSIPluginIndexSort) Len() int { + return len(v) +} + +func (v CSIPluginIndexSort) Less(i, j int) bool { + return v[i].CreateIndex > v[j].CreateIndex +} + +func (v CSIPluginIndexSort) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} + +// CSIPlugins returns a handle on the CSIPlugins endpoint +func (c *Client) CSIPlugins() *CSIPlugins { + return &CSIPlugins{client: c} +} + +// List returns all CSI plugins +func (v *CSIPlugins) List(q *QueryOptions) ([]*CSIPluginListStub, *QueryMeta, error) { + var resp []*CSIPluginListStub + qm, err := v.client.query("/v1/csi/plugins", &resp, q) + if err != nil { + return nil, nil, err + } + sort.Sort(CSIPluginIndexSort(resp)) + return resp, qm, nil +} + +// Info is used to retrieve a single CSI Plugin Job +func (v *CSIPlugins) Info(id string, q *QueryOptions) (*CSIPlugin, *QueryMeta, error) { + var resp *CSIPlugin + qm, err := v.client.query("/v1/csi/plugin/"+id, &resp, q) + if err != nil { + return nil, nil, err + } + return resp, qm, nil +} diff --git a/api/csi_test.go b/api/csi_test.go index 4e13ed4f9..3557bd0ed 100644 --- a/api/csi_test.go +++ b/api/csi_test.go @@ -3,7 +3,7 @@ package api import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCSIVolumes_CRUD(t *testing.T) { @@ -14,9 +14,9 @@ func TestCSIVolumes_CRUD(t *testing.T) { // Successful empty result vols, qm, err := v.List(nil) - assert.NoError(t, err) - assert.NotEqual(t, 0, qm.LastIndex) - assert.Equal(t, 0, len(vols)) + require.NoError(t, err) + require.NotEqual(t, 0, qm.LastIndex) + require.Equal(t, 0, len(vols)) // Authorized QueryOpts. Use the root token to just bypass ACL details opts := &QueryOptions{ @@ -32,38 +32,89 @@ func TestCSIVolumes_CRUD(t *testing.T) { } // Register a volume - v.Register(&CSIVolume{ - ID: "DEADBEEF-63C7-407F-AE82-C99FBEF78FEB", - Driver: "minnie", + id := "DEADBEEF-31B5-8F78-7986-DD404FDA0CD1" + _, err = v.Register(&CSIVolume{ + ID: id, Namespace: "default", + PluginID: "adam", AccessMode: CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: CSIVolumeAttachmentModeFilesystem, Topologies: []*CSITopology{{Segments: map[string]string{"foo": "bar"}}}, }, wpts) + require.NoError(t, err) // Successful result with volumes vols, qm, err = v.List(opts) - assert.NoError(t, err) - assert.NotEqual(t, 0, qm.LastIndex) - assert.Equal(t, 1, len(vols)) + require.NoError(t, err) + require.NotEqual(t, 0, qm.LastIndex) + require.Equal(t, 1, len(vols)) // Successful info query - vol, qm, err := v.Info("DEADBEEF-63C7-407F-AE82-C99FBEF78FEB", opts) - assert.NoError(t, err) - assert.Equal(t, "minnie", vol.Driver) - assert.Equal(t, "bar", vol.Topologies[0].Segments["foo"]) + vol, qm, err := v.Info(id, opts) + require.NoError(t, err) + require.Equal(t, "bar", vol.Topologies[0].Segments["foo"]) // Deregister the volume - err = v.Deregister("DEADBEEF-63C7-407F-AE82-C99FBEF78FEB", wpts) - assert.NoError(t, err) + err = v.Deregister(id, wpts) + require.NoError(t, err) // Successful empty result vols, qm, err = v.List(nil) - assert.NoError(t, err) - assert.NotEqual(t, 0, qm.LastIndex) - assert.Equal(t, 0, len(vols)) + require.NoError(t, err) + require.NotEqual(t, 0, qm.LastIndex) + require.Equal(t, 0, len(vols)) // Failed info query - vol, qm, err = v.Info("DEADBEEF-63C7-407F-AE82-C99FBEF78FEB", opts) - assert.Error(t, err, "missing") + vol, qm, err = v.Info(id, opts) + require.Error(t, err, "missing") +} + +func TestCSIPlugins_viaJob(t *testing.T) { + t.Parallel() + c, s, root := makeACLClient(t, nil, nil) + defer s.Stop() + p := c.CSIPlugins() + + // Successful empty result + plugs, qm, err := p.List(nil) + require.NoError(t, err) + require.NotEqual(t, 0, qm.LastIndex) + require.Equal(t, 0, len(plugs)) + + // Authorized QueryOpts. Use the root token to just bypass ACL details + opts := &QueryOptions{ + Region: "global", + Namespace: "default", + AuthToken: root.SecretID, + } + + wpts := &WriteOptions{ + Region: "global", + Namespace: "default", + AuthToken: root.SecretID, + } + + // Register a plugin job + j := c.Jobs() + job := testJob() + job.Namespace = stringToPtr("default") + job.TaskGroups[0].Tasks[0].CSIPluginConfig = &TaskCSIPluginConfig{ + ID: "foo", + Type: "monolith", + MountDir: "/not-empty", + } + _, _, err = j.Register(job, wpts) + require.NoError(t, err) + + // Successful result with the plugin + plugs, qm, err = p.List(opts) + require.NoError(t, err) + require.NotEqual(t, 0, qm.LastIndex) + require.Equal(t, 1, len(plugs)) + + // Successful info query + plug, qm, err := p.Info("foo", opts) + require.NoError(t, err) + require.NotNil(t, plug.Jobs[*job.Namespace][*job.ID]) + require.Equal(t, *job.ID, *plug.Jobs[*job.Namespace][*job.ID].ID) } diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go index 705157ec6..c6daad16f 100644 --- a/command/agent/csi_endpoint.go +++ b/command/agent/csi_endpoint.go @@ -29,10 +29,8 @@ func (s *HTTPServer) CSIVolumesRequest(resp http.ResponseWriter, req *http.Reque // CSIVolumeSpecificRequest dispatches GET and PUT func (s *HTTPServer) CSIVolumeSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Tokenize the suffix of the path to get the volume id reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/csi/volume/") - - // tokenize the suffix of the path to get the alloc id and find the action - // invoked on the alloc id tokens := strings.Split(reqSuffix, "/") if len(tokens) > 2 || len(tokens) < 1 { return nil, CodedError(404, resourceNotFoundErr) @@ -66,7 +64,7 @@ func (s *HTTPServer) csiVolumeGet(id string, resp http.ResponseWriter, req *http setMeta(resp, &out.QueryMeta) if out.Volume == nil { - return nil, CodedError(404, "alloc not found") + return nil, CodedError(404, "volume not found") } return out.Volume, nil @@ -116,3 +114,56 @@ func (s *HTTPServer) csiVolumeDelete(id string, resp http.ResponseWriter, req *h return nil, nil } + +// CSIPluginsRequest lists CSI plugins +func (s *HTTPServer) CSIPluginsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "GET" { + return nil, CodedError(405, ErrInvalidMethod) + } + + args := structs.CSIPluginListRequest{} + + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + var out structs.CSIPluginListResponse + if err := s.agent.RPC("CSIPlugin.List", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + return out.Plugins, nil +} + +// CSIPluginSpecificRequest list the job with CSIInfo +func (s *HTTPServer) CSIPluginSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + if req.Method != "GET" { + return nil, CodedError(405, ErrInvalidMethod) + } + + // Tokenize the suffix of the path to get the plugin id + reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/csi/plugin/") + tokens := strings.Split(reqSuffix, "/") + if len(tokens) > 2 || len(tokens) < 1 { + return nil, CodedError(404, resourceNotFoundErr) + } + id := tokens[0] + + args := structs.CSIPluginGetRequest{ID: id} + if s.parse(resp, req, &args.Region, &args.QueryOptions) { + return nil, nil + } + + var out structs.CSIPluginGetResponse + if err := s.agent.RPC("CSIPlugin.Get", &args, &out); err != nil { + return nil, err + } + + setMeta(resp, &out.QueryMeta) + if out.Plugin == nil { + return nil, CodedError(404, "plugin not found") + } + + return out.Plugin, nil +} diff --git a/command/agent/http.go b/command/agent/http.go index 57100eca1..dcd270213 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -255,6 +255,8 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) { s.mux.HandleFunc("/v1/csi/volumes", s.wrap(s.CSIVolumesRequest)) s.mux.HandleFunc("/v1/csi/volume/", s.wrap(s.CSIVolumeSpecificRequest)) + s.mux.HandleFunc("/v1/csi/plugins", s.wrap(s.CSIPluginsRequest)) + s.mux.HandleFunc("/v1/csi/plugin/", s.wrap(s.CSIPluginSpecificRequest)) s.mux.HandleFunc("/v1/acl/policies", s.wrap(s.ACLPoliciesRequest)) s.mux.HandleFunc("/v1/acl/policy/", s.wrap(s.ACLPolicySpecificRequest)) diff --git a/contributing/checklist-rpc-endpoint.md b/contributing/checklist-rpc-endpoint.md index 30b5d36e6..29ed912dc 100644 --- a/contributing/checklist-rpc-endpoint.md +++ b/contributing/checklist-rpc-endpoint.md @@ -15,6 +15,7 @@ Prefer adding a new message to changing any existing RPC messages. * RPCs are resolved by matching the method name for bound structs [net/rpc](https://golang.org/pkg/net/rpc/) * Check ACLs for security, list endpoints filter by ACL + * Register new RPC struct in `nomad/server.go` * Wrapper for the HTTP request in `command/agent/foo_endpoint.go` * Backwards compatibility requires a new endpoint, an upgraded client or server may be forwarding this request to an old server, diff --git a/nomad/csi_volume_endpoint.go b/nomad/csi_endpoint.go similarity index 63% rename from nomad/csi_volume_endpoint.go rename to nomad/csi_endpoint.go index e10b9a55e..c1d83266d 100644 --- a/nomad/csi_volume_endpoint.go +++ b/nomad/csi_endpoint.go @@ -61,10 +61,16 @@ func (srv *Server) WriteACLObj(args *structs.WriteRequest) (*acl.ACL, error) { return srv.QueryACLObj(opts) } -// replyCSIVolumeIndex sets the reply with the last index that modified the table csi_volumes -func (srv *Server) replySetCSIVolumeIndex(state *state.StateStore, reply *structs.QueryMeta) error { - // Use the last index that affected the table - index, err := state.Index("csi_volumes") +const ( + csiVolumeTable = "csi_volumes" + csiPluginTable = "csi_plugins" +) + +// replySetIndex sets the reply with the last index that modified the table +func (srv *Server) replySetIndex(table string, reply *structs.QueryMeta) error { + s := srv.fsm.State() + + index, err := s.Index(table) if err != nil { return err } @@ -98,8 +104,8 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV var err error var iter memdb.ResultIterator - if args.Driver != "" { - iter, err = state.CSIVolumesByDriver(ws, args.Driver) + if args.PluginID != "" { + iter, err = state.CSIVolumesByPluginID(ws, args.PluginID) } else { iter, err = state.CSIVolumes(ws) } @@ -117,7 +123,12 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV if raw == nil { break } + vol := raw.(*structs.CSIVolume) + vol, err := state.CSIVolumeDenormalizePlugins(ws, vol) + if err != nil { + return err + } // Filter on the request namespace to avoid ACL checks by volume if ns != "" && vol.Namespace != args.RequestNamespace() { @@ -136,7 +147,7 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV } } reply.Volumes = vs - return v.srv.replySetCSIVolumeIndex(state, &reply.QueryMeta) + return v.srv.replySetIndex(csiVolumeTable, &reply.QueryMeta) }} return v.srv.blockingRPC(&opts) } @@ -168,12 +179,15 @@ func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVol return err } - if vol == nil { - return structs.ErrMissingCSIVolumeID + if vol != nil { + vol, err = state.CSIVolumeDenormalize(ws, vol) + } + if err != nil { + return err } reply.Volume = vol - return v.srv.replySetCSIVolumeIndex(state, &reply.QueryMeta) + return v.srv.replySetIndex(csiVolumeTable, &reply.QueryMeta) }} return v.srv.blockingRPC(&opts) } @@ -215,7 +229,7 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru return err } - return v.srv.replySetCSIVolumeIndex(state, &reply.QueryMeta) + return v.srv.replySetIndex(csiVolumeTable, &reply.QueryMeta) } // Deregister removes a set of volumes @@ -248,5 +262,103 @@ func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply * return err } - return v.srv.replySetCSIVolumeIndex(state, &reply.QueryMeta) + return v.srv.replySetIndex(csiVolumeTable, &reply.QueryMeta) +} + +// CSIPlugin wraps the structs.CSIPlugin with request data and server context +type CSIPlugin struct { + srv *Server + logger log.Logger +} + +// List replies with CSIPlugins, filtered by ACL access +func (v *CSIPlugin) List(args *structs.CSIPluginListRequest, reply *structs.CSIPluginListResponse) error { + if done, err := v.srv.forward("CSIPlugin.List", args, args, reply); done { + return err + } + + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions) + if err != nil { + return err + } + + if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSIAccess) { + return structs.ErrPermissionDenied + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "plugin", "list"}, metricsStart) + + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, state *state.StateStore) error { + // Query all plugins + iter, err := state.CSIPlugins(ws) + if err != nil { + return err + } + + // Collect results + var ps []*structs.CSIPluginListStub + for { + raw := iter.Next() + if raw == nil { + break + } + + plug := raw.(*structs.CSIPlugin) + + // FIXME we should filter the ACL access for the plugin's + // namespace, but plugins don't currently have namespaces + ps = append(ps, plug.Stub()) + } + + reply.Plugins = ps + return v.srv.replySetIndex(csiPluginTable, &reply.QueryMeta) + }} + return v.srv.blockingRPC(&opts) +} + +// Get fetches detailed information about a specific plugin +func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPluginGetResponse) error { + if done, err := v.srv.forward("CSIPlugin.Get", args, args, reply); done { + return err + } + + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions) + if err != nil { + return err + } + + if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSIAccess) { + return structs.ErrPermissionDenied + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "plugin", "get"}, metricsStart) + + opts := blockingOptions{ + queryOpts: &args.QueryOptions, + queryMeta: &reply.QueryMeta, + run: func(ws memdb.WatchSet, state *state.StateStore) error { + plug, err := state.CSIPluginByID(ws, args.ID) + if err != nil { + return err + } + + if plug != nil { + plug, err = state.CSIPluginDenormalize(ws, plug) + } + if err != nil { + return err + } + + // FIXME we should re-check the ACL access for the plugin's + // namespace, but plugins don't currently have namespaces + + reply.Plugin = plug + return v.srv.replySetIndex(csiPluginTable, &reply.QueryMeta) + }} + return v.srv.blockingRPC(&opts) } diff --git a/nomad/csi_volume_endpoint_test.go b/nomad/csi_endpoint_test.go similarity index 69% rename from nomad/csi_volume_endpoint_test.go rename to nomad/csi_endpoint_test.go index a66cc5477..bb1cac005 100644 --- a/nomad/csi_volume_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -38,7 +38,7 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { Namespace: ns, AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - Driver: "minnie", + PluginID: "minnie", }} err := state.CSIVolumeRegister(0, vols) require.NoError(t, err) @@ -84,7 +84,7 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { vols := []*structs.CSIVolume{{ ID: id0, Namespace: "notTheNamespace", - Driver: "minnie", + PluginID: "minnie", AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, Topologies: []*structs.CSITopology{{ @@ -124,7 +124,7 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { require.Equal(t, vols[0].ID, resp2.Volume.ID) // Registration does not update - req1.Volumes[0].Driver = "adam" + req1.Volumes[0].PluginID = "adam" err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1) require.Error(t, err, "exists") @@ -143,7 +143,8 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { // Volume is missing err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2) - require.Error(t, err, "missing") + require.NoError(t, err) + require.Nil(t, resp2.Volume) } func TestCSIVolumeEndpoint_List(t *testing.T) { @@ -175,19 +176,19 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { Namespace: ns, AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - Driver: "minnie", + PluginID: "minnie", }, { ID: id1, Namespace: ns, AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - Driver: "adam", + PluginID: "adam", }, { ID: id2, Namespace: ms, AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - Driver: "paddy", + PluginID: "paddy", }} err := state.CSIVolumeRegister(0, vols) require.NoError(t, err) @@ -211,9 +212,9 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { } require.Equal(t, 0, len(ids)) - // Query by Driver + // Query by PluginID req = &structs.CSIVolumeListRequest{ - Driver: "adam", + PluginID: "adam", QueryOptions: structs.QueryOptions{ Region: "global", Namespace: ns, @@ -225,9 +226,9 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { require.Equal(t, 1, len(resp.Volumes)) require.Equal(t, vols[1].ID, resp.Volumes[0].ID) - // Query by Driver, ACL filters all results + // Query by PluginID, ACL filters all results req = &structs.CSIVolumeListRequest{ - Driver: "paddy", + PluginID: "paddy", QueryOptions: structs.QueryOptions{ Region: "global", Namespace: ms, @@ -238,3 +239,93 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { require.NoError(t, err) require.Equal(t, 0, len(resp.Volumes)) } + +func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + + job := mock.Job() + job.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ + ID: "foo", + Type: structs.CSIPluginTypeMonolith, + MountDir: "non-empty", + } + + state := srv.fsm.State() + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + srv.config.ACLEnabled = true + policy := mock.NamespacePolicy(ns, "", []string{ + acl.NamespaceCapabilityCSICreateVolume, + acl.NamespaceCapabilitySubmitJob, + }) + validToken := mock.CreatePolicyAndToken(t, state, 1001, acl.NamespaceCapabilityCSICreateVolume, policy) + + codec := rpcClient(t, srv) + + // Create the register request + req1 := &structs.JobRegisterRequest{ + Job: job, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + AuthToken: validToken.SecretID, + }, + } + resp1 := &structs.JobRegisterResponse{} + err := msgpackrpc.CallWithCodec(codec, "Job.Register", req1, resp1) + require.NoError(t, err) + require.NotEqual(t, 0, resp1.Index) + + // Get the plugin back out + policy = mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) + getToken := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) + + req2 := &structs.CSIPluginGetRequest{ + ID: "foo", + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: getToken.SecretID, + }, + } + resp2 := &structs.CSIPluginGetResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) + require.NoError(t, err) + require.NotEqual(t, 0, resp2.Index) + + // List plugins + req3 := &structs.CSIPluginListRequest{ + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: getToken.SecretID, + }, + } + resp3 := &structs.CSIPluginListResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.List", req3, resp3) + require.NoError(t, err) + require.Equal(t, 1, len(resp3.Plugins)) + + // Deregistration works + req4 := &structs.JobDeregisterRequest{ + JobID: job.ID, + Purge: true, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + AuthToken: validToken.SecretID, + }, + } + resp4 := &structs.JobDeregisterResponse{} + err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", req4, resp4) + require.NoError(t, err) + + // Plugin is missing + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) + require.NoError(t, err) + require.Nil(t, resp2.Plugin) +} diff --git a/nomad/server.go b/nomad/server.go index 9dda975bf..41ffe8cbe 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -250,6 +250,7 @@ type endpoints struct { Plan *Plan Alloc *Alloc CSIVolume *CSIVolume + CSIPlugin *CSIPlugin Deployment *Deployment Region *Region Search *Search @@ -1094,6 +1095,7 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { s.staticEndpoints.Job = NewJobEndpoints(s) s.staticEndpoints.Node = &Node{srv: s, logger: s.logger.Named("client")} // Add but don't register s.staticEndpoints.CSIVolume = &CSIVolume{srv: s, logger: s.logger.Named("csi_volume")} + s.staticEndpoints.CSIPlugin = &CSIPlugin{srv: s, logger: s.logger.Named("csi_plugin")} s.staticEndpoints.Deployment = &Deployment{srv: s, logger: s.logger.Named("deployment")} s.staticEndpoints.Operator = &Operator{srv: s, logger: s.logger.Named("operator")} s.staticEndpoints.Periodic = &Periodic{srv: s, logger: s.logger.Named("periodic")} @@ -1123,6 +1125,7 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { server.Register(s.staticEndpoints.Eval) server.Register(s.staticEndpoints.Job) server.Register(s.staticEndpoints.CSIVolume) + server.Register(s.staticEndpoints.CSIPlugin) server.Register(s.staticEndpoints.Deployment) server.Register(s.staticEndpoints.Operator) server.Register(s.staticEndpoints.Periodic) diff --git a/nomad/state/schema.go b/nomad/state/schema.go index 3ea7321b8..8a7db3e6e 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -48,6 +48,7 @@ func init() { schedulerConfigTableSchema, clusterMetaTableSchema, csiVolumeTableSchema, + csiPluginTableSchema, }...) } @@ -679,13 +680,11 @@ func clusterMetaTableSchema() *memdb.TableSchema { } } +// CSIVolumes are identified by id globally, and searchable by driver func csiVolumeTableSchema() *memdb.TableSchema { return &memdb.TableSchema{ Name: "csi_volumes", Indexes: map[string]*memdb.IndexSchema{ - // Primary index is used for volume upsert - // and simple direct lookup. ID is required to be - // unique. "id": { Name: "id", AllowMissing: false, @@ -694,12 +693,29 @@ func csiVolumeTableSchema() *memdb.TableSchema { Field: "ID", }, }, - "driver": { - Name: "driver", + "plugin_id": { + Name: "plugin_id", AllowMissing: false, Unique: false, Indexer: &memdb.StringFieldIndex{ - Field: "Driver", + Field: "PluginID", + }, + }, + }, + } +} + +// CSIPlugins are identified by id globally, and searchable by driver +func csiPluginTableSchema() *memdb.TableSchema { + return &memdb.TableSchema{ + Name: "csi_plugins", + Indexes: map[string]*memdb.IndexSchema{ + "id": { + Name: "id", + AllowMissing: false, + Unique: true, + Indexer: &memdb.StringFieldIndex{ + Field: "ID", }, }, }, diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 74e1085ee..d8860de94 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -677,6 +677,9 @@ func (s *StateStore) UpsertNode(index uint64, node *structs.Node) error { if err := txn.Insert("index", &IndexEntry{"nodes", index}); err != nil { return fmt.Errorf("index update failed: %v", err) } + if err := upsertNodeCSIPlugins(txn, node, index); err != nil { + return fmt.Errorf("csi plugin update failed: %v", err) + } txn.Commit() return nil @@ -704,6 +707,11 @@ func (s *StateStore) DeleteNode(index uint64, nodes []string) error { if err := txn.Delete("nodes", existing); err != nil { return fmt.Errorf("node delete failed: %s: %v", nodeID, err) } + + node := existing.(*structs.Node) + if err := deleteNodeCSIPlugins(txn, node, index); err != nil { + return fmt.Errorf("csi plugin delete failed: %v", err) + } } if err := txn.Insert("index", &IndexEntry{"nodes", index}); err != nil { @@ -931,6 +939,95 @@ func appendNodeEvents(index uint64, node *structs.Node, events []*structs.NodeEv } } +// upsertNodeCSIPlugins indexes csi plugins for volume retrieval, with health. It's called +// on upsertNodeEvents, so that event driven health changes are updated +func upsertNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) error { + if len(node.CSIControllerPlugins) == 0 && len(node.CSINodePlugins) == 0 { + return nil + } + + loop := func(info *structs.CSIInfo) error { + raw, err := txn.First("csi_plugins", "id", info.PluginID) + if err != nil { + return fmt.Errorf("csi_plugin lookup error: %s %v", info.PluginID, err) + } + + var plug *structs.CSIPlugin + if raw != nil { + plug = raw.(*structs.CSIPlugin).Copy(index) + } else { + plug = structs.NewCSIPlugin(info.PluginID, index) + } + + plug.AddPlugin(node.ID, info, index) + + err = txn.Insert("csi_plugins", plug) + if err != nil { + return fmt.Errorf("csi_plugins insert error: %v", err) + } + + return nil + } + + for _, info := range node.CSIControllerPlugins { + err := loop(info) + if err != nil { + return err + } + } + + for _, info := range node.CSINodePlugins { + err := loop(info) + if err != nil { + return err + } + } + + return nil +} + +// deleteNodeCSIPlugins cleans up CSIInfo node health status, called in DeleteNode +func deleteNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) error { + if len(node.CSIControllerPlugins) == 0 && len(node.CSINodePlugins) == 0 { + return nil + } + + names := map[string]struct{}{} + for _, info := range node.CSIControllerPlugins { + names[info.PluginID] = struct{}{} + } + for _, info := range node.CSINodePlugins { + names[info.PluginID] = struct{}{} + } + + for id := range names { + raw, err := txn.First("csi_plugins", "id", id) + if err != nil { + return fmt.Errorf("csi_plugins lookup error %s: %v", id, err) + } + if raw == nil { + return fmt.Errorf("csi_plugins missing plugin %s", id) + } + + plug := raw.(*structs.CSIPlugin).Copy(index) + plug.DeleteNode(node.ID, index) + + if plug.IsEmpty() { + err := txn.Delete("csi_plugins", plug) + if err != nil { + return fmt.Errorf("csi_plugins delete error: %v", err) + } + } + + err = txn.Insert("csi_plugins", plug) + if err != nil { + return fmt.Errorf("csi_plugins update error %s: %v", id, err) + } + } + + return nil +} + // NodeByID is used to lookup a node by ID func (s *StateStore) NodeByID(ws memdb.WatchSet, nodeID string) (*structs.Node, error) { txn := s.db.Txn(false) @@ -1068,6 +1165,10 @@ func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion b return fmt.Errorf("unable to upsert job into job_version table: %v", err) } + if err := s.upsertJobCSIPlugins(index, job, txn); err != nil { + return fmt.Errorf("unable to upsert csi_plugins table: %v", err) + } + // Insert the job if err := txn.Insert("jobs", job); err != nil { return fmt.Errorf("job insert failed: %v", err) @@ -1162,6 +1263,11 @@ func (s *StateStore) DeleteJobTxn(index uint64, namespace, jobID string, txn Txn return err } + // Delete the csi_plugins + if err := s.deleteJobCSIPlugins(index, job, txn); err != nil { + return err + } + // Delete the job summary if _, err = txn.DeleteAll("job_summary", "id", namespace, jobID); err != nil { return fmt.Errorf("deleing job summary failed: %v", err) @@ -1511,21 +1617,26 @@ func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, namespace, id string) return iter, nil } -// CSIVolumeRegister adds a volume to the server store, iff it's not new +// CSIVolumeRegister adds a volume to the server store, failing if it already exists func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolume) error { txn := s.db.Txn(true) defer txn.Abort() for _, v := range volumes { // Check for volume existence - _, obj, err := txn.FirstWatch("csi_volumes", "id", v.ID) + obj, err := txn.First("csi_volumes", "id", v.ID) if err != nil { - return fmt.Errorf("volume existence check: %v", err) + return fmt.Errorf("volume existence check error: %v", err) } if obj != nil { return fmt.Errorf("volume exists: %s", v.ID) } + if v.CreateIndex == 0 { + v.CreateIndex = index + v.ModifyIndex = index + } + err = txn.Insert("csi_volumes", v) if err != nil { return fmt.Errorf("volume insert: %v", err) @@ -1546,19 +1657,22 @@ func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, id string) (*structs.CSIVo } ws.Add(watchCh) - if obj != nil { - v := obj.(*structs.CSIVolume) - return v, nil + if obj == nil { + return nil, nil } - return nil, nil + vol := obj.(*structs.CSIVolume) + // Health data is stale, so set this volume unhealthy until it's denormalized + vol.Healthy = false + + return vol, nil } -// CSIVolumes looks up the entire csi_volumes table -func (s *StateStore) CSIVolumesByDriver(ws memdb.WatchSet, driver string) (memdb.ResultIterator, error) { +// CSIVolumes looks up csi_volumes by pluginID +func (s *StateStore) CSIVolumesByPluginID(ws memdb.WatchSet, pluginID string) (memdb.ResultIterator, error) { txn := s.db.Txn(false) - iter, err := txn.Get("csi_volumes", "driver", driver) + iter, err := txn.Get("csi_volumes", "plugin_id", pluginID) if err != nil { return nil, fmt.Errorf("volume lookup failed: %v", err) } @@ -1593,11 +1707,13 @@ func (s *StateStore) CSIVolumeClaim(index uint64, id string, alloc *structs.Allo return fmt.Errorf("volume not found: %s", id) } - volume, ok := row.(*structs.CSIVolume) + orig, ok := row.(*structs.CSIVolume) if !ok { return fmt.Errorf("volume row conversion error") } + volume := orig.Copy(index) + if !volume.Claim(claim, alloc) { return fmt.Errorf("volume max claim reached") } @@ -1634,6 +1750,222 @@ func (s *StateStore) CSIVolumeDeregister(index uint64, ids []string) error { return nil } +// upsertJobCSIPlugins is called on UpsertJob and maintains the csi_plugin index of jobs +func (s *StateStore) upsertJobCSIPlugins(index uint64, job *structs.Job, txn *memdb.Txn) error { + ws := memdb.NewWatchSet() + plugs, err := s.csiPluginsByJob(ws, job, index) + if err != nil { + return fmt.Errorf("%v", err) + } + + // Append this job to all of them + for _, plug := range plugs { + if plug.CreateIndex != index { + plug = plug.Copy(index) + } + + plug.AddJob(job) + plug.ModifyIndex = index + err := txn.Insert("csi_plugins", plug) + if err != nil { + return err + } + } + + return nil +} + +// csiPluginsByJob finds or creates CSIPlugins identified by the configuration contained in job +func (s *StateStore) csiPluginsByJob(ws memdb.WatchSet, job *structs.Job, index uint64) (map[string]*structs.CSIPlugin, error) { + txn := s.db.Txn(false) + defer txn.Abort() + + plugs := map[string]*structs.CSIPlugin{} + + for _, tg := range job.TaskGroups { + for _, t := range tg.Tasks { + if t.CSIPluginConfig == nil { + continue + } + + plug, ok := plugs[t.CSIPluginConfig.ID] + if ok { + continue + } + + plug, err := s.CSIPluginByID(ws, t.CSIPluginConfig.ID) + if err != nil { + return nil, err + } + + if plug == nil { + plug = structs.NewCSIPlugin(t.CSIPluginConfig.ID, index) + plug.Type = t.CSIPluginConfig.Type + } + + plugs[t.CSIPluginConfig.ID] = plug + } + } + + return plugs, nil +} + +// deleteJobCSIPlugins is called on DeleteJob +func (s *StateStore) deleteJobCSIPlugins(index uint64, job *structs.Job, txn *memdb.Txn) error { + ws := memdb.NewWatchSet() + plugs, err := s.csiPluginsByJob(ws, job, index) + if err != nil { + return fmt.Errorf("%v", err) + } + + // Remove this job from each plugin. If the plugin has no jobs left, remove it + for _, plug := range plugs { + if plug.CreateIndex != index { + plug = plug.Copy(index) + } + + plug.DeleteJob(job) + if plug.IsEmpty() { + err = txn.Delete("csi_plugins", plug) + } else { + plug.ModifyIndex = index + err = txn.Insert("csi_plugins", plug) + } + if err != nil { + return fmt.Errorf("csi_plugins update: %v", err) + } + } + + return nil +} + +// CSIVolumeDenormalize takes a CSIVolume and denormalizes for the API +func (s *StateStore) CSIVolumeDenormalize(ws memdb.WatchSet, vol *structs.CSIVolume) (*structs.CSIVolume, error) { + if vol == nil { + return nil, nil + } + + vol, err := s.CSIVolumeDenormalizePlugins(ws, vol) + if err != nil { + return nil, err + } + + return s.csiVolumeDenormalizeAllocs(ws, vol) +} + +// CSIVolumeDenormalize returns a CSIVolume with current health and plugins +func (s *StateStore) CSIVolumeDenormalizePlugins(ws memdb.WatchSet, vol *structs.CSIVolume) (*structs.CSIVolume, error) { + if vol == nil { + return nil, nil + } + + // Lookup CSIPlugin, the health records, and calculate volume health + txn := s.db.Txn(false) + defer txn.Abort() + + plug, err := s.CSIPluginByID(ws, vol.PluginID) + if err != nil { + return nil, fmt.Errorf("plugin lookup error: %s %v", vol.PluginID, err) + } + if plug == nil { + vol.ControllersHealthy = 0 + vol.NodesHealthy = 0 + vol.Healthy = false + return vol, nil + } + + vol.ControllersHealthy = plug.ControllersHealthy + vol.NodesHealthy = plug.NodesHealthy + // This number is incorrect! The expected number of node plugins is actually this + + // the number of blocked evaluations for the jobs controlling these plugins + vol.ControllersExpected = len(plug.Controllers) + vol.NodesExpected = len(plug.Nodes) + + vol.Healthy = vol.ControllersHealthy > 0 && vol.NodesHealthy > 0 + + return vol, nil +} + +// csiVolumeDenormalizeAllocs returns a CSIVolume with allocations +func (s *StateStore) csiVolumeDenormalizeAllocs(ws memdb.WatchSet, vol *structs.CSIVolume) (*structs.CSIVolume, error) { + for id := range vol.ReadAllocs { + a, err := s.AllocByID(ws, id) + if err != nil { + return nil, err + } + vol.ReadAllocs[id] = a + } + + for id := range vol.WriteAllocs { + a, err := s.AllocByID(ws, id) + if err != nil { + return nil, err + } + vol.WriteAllocs[id] = a + } + + for id := range vol.PastAllocs { + a, err := s.AllocByID(ws, id) + if err != nil { + return nil, err + } + vol.PastAllocs[id] = a + } + + return vol, nil +} + +// CSIPlugins returns the unfiltered list of all plugin health status +func (s *StateStore) CSIPlugins(ws memdb.WatchSet) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + defer txn.Abort() + + iter, err := txn.Get("csi_plugins", "id") + if err != nil { + return nil, fmt.Errorf("csi_plugins lookup failed: %v", err) + } + + return iter, nil +} + +// CSIPluginByID returns the one named CSIPlugin +func (s *StateStore) CSIPluginByID(ws memdb.WatchSet, id string) (*structs.CSIPlugin, error) { + txn := s.db.Txn(false) + defer txn.Abort() + + raw, err := txn.First("csi_plugins", "id", id) + if err != nil { + return nil, fmt.Errorf("csi_plugin lookup failed: %s %v", id, err) + } + + if raw == nil { + return nil, nil + } + + plug := raw.(*structs.CSIPlugin) + + return plug, nil +} + +// CSIPluginDenormalize returns a CSIPlugin with jobs +func (s *StateStore) CSIPluginDenormalize(ws memdb.WatchSet, plug *structs.CSIPlugin) (*structs.CSIPlugin, error) { + if plug == nil { + return nil, nil + } + + for ns, js := range plug.Jobs { + for id := range js { + j, err := s.JobByID(ws, ns, id) + if err != nil { + return nil, err + } + plug.Jobs[ns][id] = j + } + } + + return plug, nil +} + // UpsertPeriodicLaunch is used to register a launch or update it. func (s *StateStore) UpsertPeriodicLaunch(index uint64, launch *structs.PeriodicLaunch) error { txn := s.db.Txn(true) @@ -2448,8 +2780,8 @@ func (s *StateStore) AllocsByNodeTerminal(ws memdb.WatchSet, node string, termin return out, nil } -// AllocsByJob returns all the allocations by job id -func (s *StateStore) AllocsByJob(ws memdb.WatchSet, namespace, jobID string, all bool) ([]*structs.Allocation, error) { +// AllocsByJob returns allocations by job id +func (s *StateStore) AllocsByJob(ws memdb.WatchSet, namespace, jobID string, anyCreateIndex bool) ([]*structs.Allocation, error) { txn := s.db.Txn(false) // Get the job @@ -2481,7 +2813,7 @@ func (s *StateStore) AllocsByJob(ws memdb.WatchSet, namespace, jobID string, all // If the allocation belongs to a job with the same ID but a different // create index and we are not getting all the allocations whose Jobs // matches the same Job ID then we skip it - if !all && job != nil && alloc.Job.CreateIndex != job.CreateIndex { + if !anyCreateIndex && job != nil && alloc.Job.CreateIndex != job.CreateIndex { continue } out = append(out, raw.(*structs.Allocation)) diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index b5666927e..df24da193 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2830,18 +2830,20 @@ func TestStateStore_RestoreJobSummary(t *testing.T) { func TestStateStore_CSIVolume(t *testing.T) { state := testStateStore(t) - v0 := structs.CreateCSIVolume("foo") - v0.ID = "DEADBEEF-70AD-4672-9178-802BCA500C87" + id0, id1 := uuid.Generate(), uuid.Generate() + + v0 := structs.NewCSIVolume("foo") + v0.ID = id0 v0.Namespace = "default" - v0.Driver = "minnie" + v0.PluginID = "minnie" v0.Healthy = true v0.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem - v1 := structs.CreateCSIVolume("foo") - v1.ID = "BAADF00D-70AD-4672-9178-802BCA500C87" + v1 := structs.NewCSIVolume("foo") + v1.ID = id1 v1.Namespace = "default" - v1.Driver = "adam" + v1.PluginID = "adam" v1.Healthy = true v1.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v1.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem @@ -2869,18 +2871,18 @@ func TestStateStore_CSIVolume(t *testing.T) { require.Equal(t, 2, len(vs)) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByDriver(ws, "minnie") + iter, err = state.CSIVolumesByPluginID(ws, "minnie") require.NoError(t, err) vs = slurp(iter) require.Equal(t, 1, len(vs)) err = state.CSIVolumeDeregister(1, []string{ - "BAADF00D-70AD-4672-9178-802BCA500C87", + id1, }) require.NoError(t, err) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByDriver(ws, "adam") + iter, err = state.CSIVolumesByPluginID(ws, "adam") require.NoError(t, err) vs = slurp(iter) require.Equal(t, 0, len(vs)) @@ -2898,26 +2900,147 @@ func TestStateStore_CSIVolume(t *testing.T) { w := structs.CSIVolumeClaimWrite u := structs.CSIVolumeClaimRelease - err = state.CSIVolumeClaim(2, "DEADBEEF-70AD-4672-9178-802BCA500C87", a0, r) + err = state.CSIVolumeClaim(2, id0, a0, r) require.NoError(t, err) - err = state.CSIVolumeClaim(2, "DEADBEEF-70AD-4672-9178-802BCA500C87", a1, w) + err = state.CSIVolumeClaim(2, id0, a1, w) require.NoError(t, err) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByDriver(ws, "minnie") + iter, err = state.CSIVolumesByPluginID(ws, "minnie") require.NoError(t, err) vs = slurp(iter) require.False(t, vs[0].CanWrite()) - err = state.CSIVolumeClaim(2, "DEADBEEF-70AD-4672-9178-802BCA500C87", a0, u) + err = state.CSIVolumeClaim(2, id0, a0, u) require.NoError(t, err) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByDriver(ws, "minnie") + iter, err = state.CSIVolumesByPluginID(ws, "minnie") require.NoError(t, err) vs = slurp(iter) require.True(t, vs[0].CanReadOnly()) } +// TestStateStore_CSIPluginJobs creates plugin jobs and tests that they create a CSIPlugin +func TestStateStore_CSIPluginJobs(t *testing.T) { + index := uint64(999) + state := testStateStore(t) + testStateStore_CSIPluginJobs(t, index, state) +} + +func testStateStore_CSIPluginJobs(t *testing.T, index uint64, state *StateStore) (uint64, *StateStore) { + j0 := mock.Job() + j0.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ + ID: "foo", + Type: structs.CSIPluginTypeController, + } + + j1 := mock.Job() + j1.Type = structs.JobTypeSystem + j1.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ + ID: "foo", + Type: structs.CSIPluginTypeNode, + } + + index++ + err := state.UpsertJob(index, j0) + require.NoError(t, err) + + index++ + err = state.UpsertJob(index, j1) + require.NoError(t, err) + + // Get the plugin back out by id + ws := memdb.NewWatchSet() + plug, err := state.CSIPluginByID(ws, "foo") + require.NoError(t, err) + + require.Equal(t, "foo", plug.ID) + + jids := map[string]struct{}{j0.ID: struct{}{}, j1.ID: struct{}{}} + for jid := range plug.Jobs[structs.DefaultNamespace] { + delete(jids, jid) + } + require.Equal(t, 0, len(jids)) + + return index, state +} + +// TestStateStore_CSIPluginNodes uses the state from jobs, and uses node fingerprinting to update health +func TestStateStore_CSIPluginNodes(t *testing.T) { + index := uint64(999) + state := testStateStore(t) + index, state = testStateStore_CSIPluginJobs(t, index, state) + testStateStore_CSIPluginNodes(t, index, state) +} + +func testStateStore_CSIPluginNodes(t *testing.T, index uint64, state *StateStore) (uint64, *StateStore) { + // Create Nodes fingerprinting the plugins + ns := []*structs.Node{mock.Node(), mock.Node(), mock.Node()} + + for _, n := range ns { + index++ + err := state.UpsertNode(index, n) + require.NoError(t, err) + } + + // Fingerprint a running controller plugin + n0 := ns[0].Copy() + n0.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + UpdateTime: time.Now(), + RequiresControllerPlugin: true, + RequiresTopologies: false, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsReadOnlyAttach: true, + SupportsListVolumes: true, + }, + }, + } + + index++ + err := state.UpsertNode(index, n0) + require.NoError(t, err) + + // Fingerprint two running node plugins + for _, n := range ns[1:] { + n = n.Copy() + n.CSINodePlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + UpdateTime: time.Now(), + RequiresControllerPlugin: true, + RequiresTopologies: false, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + + index++ + err = state.UpsertNode(index, n) + require.NoError(t, err) + } + + ws := memdb.NewWatchSet() + plug, err := state.CSIPluginByID(ws, "foo") + require.NoError(t, err) + + require.Equal(t, "foo", plug.ID) + require.Equal(t, 1, plug.ControllersHealthy) + require.Equal(t, 2, plug.NodesHealthy) + + return index, state +} + +// TestStateStore_CSIPluginBackwards gets the node state first, and the job state second +func TestStateStore_CSIPluginBackwards(t *testing.T) { + index := uint64(999) + state := testStateStore(t) + index, state = testStateStore_CSIPluginNodes(t, index, state) + testStateStore_CSIPluginJobs(t, index, state) +} + func TestStateStore_Indexes(t *testing.T) { t.Parallel() diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index d511e9e75..bcb0a8845 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -135,9 +135,9 @@ func ValidCSIVolumeWriteAccessMode(accessMode CSIVolumeAccessMode) bool { } } +// CSIVolume is the full representation of a CSI Volume type CSIVolume struct { ID string - Driver string Namespace string Topologies []*CSITopology AccessMode CSIVolumeAccessMode @@ -150,66 +150,76 @@ type CSIVolume struct { // Healthy is true if all the denormalized plugin health fields are true, and the // volume has not been marked for garbage collection - Healthy bool - VolumeGC time.Time - ControllerName string - ControllerHealthy bool - Controller []*Job - NodeHealthy int - NodeExpected int - ResourceExhausted time.Time + Healthy bool + VolumeGC time.Time + PluginID string + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + ResourceExhausted time.Time - CreatedIndex uint64 - ModifiedIndex uint64 + CreateIndex uint64 + ModifyIndex uint64 } +// CSIVolListStub is partial representation of a CSI Volume for inclusion in lists type CSIVolListStub struct { - ID string - Driver string - Namespace string - Topologies []*CSITopology - AccessMode CSIVolumeAccessMode - AttachmentMode CSIVolumeAttachmentMode - CurrentReaders int - CurrentWriters int - Healthy bool - VolumeGC time.Time - ControllerName string - ControllerHealthy bool - NodeHealthy int - NodeExpected int - CreatedIndex uint64 - ModifiedIndex uint64 + ID string + Namespace string + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode + CurrentReaders int + CurrentWriters int + Healthy bool + VolumeGC time.Time + PluginID string + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + CreateIndex uint64 + ModifyIndex uint64 } -func CreateCSIVolume(controllerName string) *CSIVolume { - return &CSIVolume{ - ControllerName: controllerName, - ReadAllocs: map[string]*Allocation{}, - WriteAllocs: map[string]*Allocation{}, - PastAllocs: map[string]*Allocation{}, - Topologies: []*CSITopology{}, +// NewCSIVolume creates the volume struct. No side-effects +func NewCSIVolume(pluginID string) *CSIVolume { + out := &CSIVolume{ + ID: pluginID, } + + out.newStructs() + return out +} + +func (v *CSIVolume) newStructs() { + if v.Topologies == nil { + v.Topologies = []*CSITopology{} + } + + v.ReadAllocs = map[string]*Allocation{} + v.WriteAllocs = map[string]*Allocation{} + v.PastAllocs = map[string]*Allocation{} } func (v *CSIVolume) Stub() *CSIVolListStub { stub := CSIVolListStub{ - ID: v.ID, - Driver: v.Driver, - Namespace: v.Namespace, - Topologies: v.Topologies, - AccessMode: v.AccessMode, - AttachmentMode: v.AttachmentMode, - CurrentReaders: len(v.ReadAllocs), - CurrentWriters: len(v.WriteAllocs), - Healthy: v.Healthy, - VolumeGC: v.VolumeGC, - ControllerName: v.ControllerName, - ControllerHealthy: v.ControllerHealthy, - NodeHealthy: v.NodeHealthy, - NodeExpected: v.NodeExpected, - CreatedIndex: v.CreatedIndex, - ModifiedIndex: v.ModifiedIndex, + ID: v.ID, + Namespace: v.Namespace, + Topologies: v.Topologies, + AccessMode: v.AccessMode, + AttachmentMode: v.AttachmentMode, + CurrentReaders: len(v.ReadAllocs), + CurrentWriters: len(v.WriteAllocs), + Healthy: v.Healthy, + VolumeGC: v.VolumeGC, + PluginID: v.PluginID, + ControllersHealthy: v.ControllersHealthy, + NodesHealthy: v.NodesHealthy, + NodesExpected: v.NodesExpected, + CreateIndex: v.CreateIndex, + ModifyIndex: v.ModifyIndex, } return &stub @@ -238,6 +248,29 @@ func (v *CSIVolume) CanWrite() bool { } } +// Copy returns a copy of the volume, which shares only the Topologies slice +func (v *CSIVolume) Copy(index uint64) *CSIVolume { + copy := *v + out := © + out.newStructs() + out.ModifyIndex = index + + for k, v := range v.ReadAllocs { + out.ReadAllocs[k] = v + } + + for k, v := range v.WriteAllocs { + out.WriteAllocs[k] = v + } + + for k, v := range v.PastAllocs { + out.PastAllocs[k] = v + } + + return out +} + +// Claim updates the allocations and changes the volume state func (v *CSIVolume) Claim(claim CSIVolumeClaimMode, alloc *Allocation) bool { switch claim { case CSIVolumeClaimRead: @@ -250,30 +283,39 @@ func (v *CSIVolume) Claim(claim CSIVolumeClaimMode, alloc *Allocation) bool { return false } +// ClaimRead marks an allocation as using a volume read-only func (v *CSIVolume) ClaimRead(alloc *Allocation) bool { if !v.CanReadOnly() { return false } - v.ReadAllocs[alloc.ID] = alloc + // Allocations are copy on write, so we want to keep the id but don't need the + // pointer. We'll get it from the db in denormalize. + v.ReadAllocs[alloc.ID] = nil delete(v.WriteAllocs, alloc.ID) delete(v.PastAllocs, alloc.ID) return true } +// ClaimWrite marks an allocation as using a volume as a writer func (v *CSIVolume) ClaimWrite(alloc *Allocation) bool { if !v.CanWrite() { return false } - v.WriteAllocs[alloc.ID] = alloc + // Allocations are copy on write, so we want to keep the id but don't need the + // pointer. We'll get it from the db in denormalize. + v.WriteAllocs[alloc.ID] = nil delete(v.ReadAllocs, alloc.ID) delete(v.PastAllocs, alloc.ID) return true } +// ClaimRelease is called when the allocation has terminated and already stopped using the volume func (v *CSIVolume) ClaimRelease(alloc *Allocation) bool { delete(v.ReadAllocs, alloc.ID) delete(v.WriteAllocs, alloc.ID) - v.PastAllocs[alloc.ID] = alloc + // Allocations are copy on write, so we want to keep the id but don't need the + // pointer. We'll get it from the db in denormalize. + v.PastAllocs[alloc.ID] = nil return true } @@ -292,11 +334,10 @@ func (v *CSIVolume) Equal(o *CSIVolume) bool { // Omit the plugin health fields, their values are controlled by plugin jobs if v.ID == o.ID && - v.Driver == o.Driver && v.Namespace == o.Namespace && v.AccessMode == o.AccessMode && v.AttachmentMode == o.AttachmentMode && - v.ControllerName == o.ControllerName { + v.PluginID == o.PluginID { // Setwise equality of topologies var ok bool for _, t := range v.Topologies { @@ -323,8 +364,8 @@ func (v *CSIVolume) Validate() error { if v.ID == "" { errs = append(errs, "missing volume id") } - if v.Driver == "" { - errs = append(errs, "missing driver") + if v.PluginID == "" { + errs = append(errs, "missing plugin id") } if v.Namespace == "" { errs = append(errs, "missing namespace") @@ -388,7 +429,7 @@ type CSIVolumeClaimRequest struct { } type CSIVolumeListRequest struct { - Driver string + PluginID string QueryOptions } @@ -406,3 +447,189 @@ type CSIVolumeGetResponse struct { Volume *CSIVolume QueryMeta } + +// CSIPlugin bundles job and info context for the plugin for clients +type CSIPlugin struct { + ID string + Type CSIPluginType + + // Jobs is updated by UpsertJob, and keeps an index of jobs containing node or + // controller tasks for this plugin. It is addressed by [job.Namespace][job.ID] + Jobs map[string]map[string]*Job + + ControllersHealthy int + Controllers map[string]*CSIInfo + NodesHealthy int + Nodes map[string]*CSIInfo + + CreateIndex uint64 + ModifyIndex uint64 +} + +// NewCSIPlugin creates the plugin struct. No side-effects +func NewCSIPlugin(id string, index uint64) *CSIPlugin { + out := &CSIPlugin{ + ID: id, + CreateIndex: index, + ModifyIndex: index, + } + + out.newStructs() + return out +} + +func (p *CSIPlugin) newStructs() { + p.Jobs = map[string]map[string]*Job{} + p.Controllers = map[string]*CSIInfo{} + p.Nodes = map[string]*CSIInfo{} +} + +func (p *CSIPlugin) Copy(index uint64) *CSIPlugin { + copy := *p + out := © + out.newStructs() + out.ModifyIndex = index + + for ns, js := range p.Jobs { + out.Jobs[ns] = map[string]*Job{} + + for jid, j := range js { + out.Jobs[ns][jid] = j + } + } + + for k, v := range p.Controllers { + out.Controllers[k] = v + } + + for k, v := range p.Nodes { + out.Nodes[k] = v + } + + return out +} + +// AddJob adds a job entry to the plugin +func (p *CSIPlugin) AddJob(job *Job) { + if _, ok := p.Jobs[job.Namespace]; !ok { + p.Jobs[job.Namespace] = map[string]*Job{} + } + p.Jobs[job.Namespace][job.ID] = nil +} + +func (p *CSIPlugin) DeleteJob(job *Job) { + delete(p.Jobs[job.Namespace], job.ID) +} + +// AddPlugin adds a single plugin running on the node. Called from state.NodeUpdate in a +// transaction +func (p *CSIPlugin) AddPlugin(nodeID string, info *CSIInfo, index uint64) { + if info.ControllerInfo != nil { + prev, ok := p.Controllers[nodeID] + if ok && prev.Healthy { + p.ControllersHealthy -= 1 + } + p.Controllers[nodeID] = info + if info.Healthy { + p.ControllersHealthy += 1 + } + } + + if info.NodeInfo != nil { + prev, ok := p.Nodes[nodeID] + if ok && prev.Healthy { + p.NodesHealthy -= 1 + } + p.Nodes[nodeID] = info + if info.Healthy { + p.NodesHealthy += 1 + } + } + + p.ModifyIndex = index +} + +// DeleteNode removes all plugins from the node. Called from state.DeleteNode in a +// transaction +func (p *CSIPlugin) DeleteNode(nodeID string, index uint64) { + prev, ok := p.Controllers[nodeID] + if ok && prev.Healthy { + p.ControllersHealthy -= 1 + } + delete(p.Controllers, nodeID) + + prev, ok = p.Nodes[nodeID] + if ok && prev.Healthy { + p.NodesHealthy -= 1 + } + delete(p.Nodes, nodeID) + + p.ModifyIndex = index +} + +type CSIPluginListStub struct { + ID string + Type CSIPluginType + JobIDs map[string]map[string]struct{} + ControllersHealthy int + ControllersExpected int + NodesHealthy int + NodesExpected int + CreateIndex uint64 + ModifyIndex uint64 +} + +func (p *CSIPlugin) Stub() *CSIPluginListStub { + ids := map[string]map[string]struct{}{} + for ns, js := range p.Jobs { + ids[ns] = map[string]struct{}{} + for id := range js { + ids[ns][id] = struct{}{} + } + } + + return &CSIPluginListStub{ + ID: p.ID, + Type: p.Type, + JobIDs: ids, + ControllersHealthy: p.ControllersHealthy, + ControllersExpected: len(p.Controllers), + NodesHealthy: p.NodesHealthy, + NodesExpected: len(p.Nodes), + CreateIndex: p.CreateIndex, + ModifyIndex: p.ModifyIndex, + } +} + +func (p *CSIPlugin) IsEmpty() bool { + if !(len(p.Controllers) == 0 && len(p.Nodes) == 0) { + return false + } + + empty := true + for _, m := range p.Jobs { + if len(m) > 0 { + empty = false + } + } + return empty +} + +type CSIPluginListRequest struct { + QueryOptions +} + +type CSIPluginListResponse struct { + Plugins []*CSIPluginListStub + QueryMeta +} + +type CSIPluginGetRequest struct { + ID string + QueryOptions +} + +type CSIPluginGetResponse struct { + Plugin *CSIPlugin + QueryMeta +} diff --git a/nomad/structs/csi_test.go b/nomad/structs/csi_test.go index 85fbaf58d..7119b7b72 100644 --- a/nomad/structs/csi_test.go +++ b/nomad/structs/csi_test.go @@ -7,7 +7,7 @@ import ( ) func TestCSIVolumeClaim(t *testing.T) { - vol := CreateCSIVolume("") + vol := NewCSIVolume("") vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter vol.Healthy = true diff --git a/nomad/structs/errors.go b/nomad/structs/errors.go index e6a63bfd7..2e3e1edd2 100644 --- a/nomad/structs/errors.go +++ b/nomad/structs/errors.go @@ -18,7 +18,6 @@ const ( errUnknownNomadVersion = "Unable to determine Nomad version" errNodeLacksRpc = "Node does not support RPC; requires 0.8 or later" errMissingAllocID = "Missing allocation ID" - errMissingCSIVolumeID = "Missing Volume ID" // Prefix based errors that are used to check if the error is of a given // type. These errors should be created with the associated constructor. @@ -42,7 +41,6 @@ var ( ErrUnknownNomadVersion = errors.New(errUnknownNomadVersion) ErrNodeLacksRpc = errors.New(errNodeLacksRpc) ErrMissingAllocID = errors.New(errMissingAllocID) - ErrMissingCSIVolumeID = errors.New(errMissingCSIVolumeID) ) // IsErrNoLeader returns whether the error is due to there being no leader. From 33c55e609b891547f0a5a0bbf00a9b15f412e212 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Tue, 28 Jan 2020 11:27:32 -0500 Subject: [PATCH 047/126] csi: pluginmanager use PluginID instead of Driver --- client/pluginmanager/csimanager/interface.go | 4 ++-- client/pluginmanager/csimanager/manager.go | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/client/pluginmanager/csimanager/interface.go b/client/pluginmanager/csimanager/interface.go index 4944bb2a3..c266e2842 100644 --- a/client/pluginmanager/csimanager/interface.go +++ b/client/pluginmanager/csimanager/interface.go @@ -9,7 +9,7 @@ import ( ) var ( - DriverNotFoundErr = errors.New("Driver not found") + PluginNotFoundErr = errors.New("Plugin not found") ) type MountInfo struct { @@ -25,7 +25,7 @@ type Manager interface { PluginManager() pluginmanager.PluginManager // MounterForVolume returns a VolumeMounter for the given requested volume. - // If there is no plugin registered for this volume type, a DriverNotFoundErr + // If there is no plugin registered for this volume type, a PluginNotFoundErr // will be returned. MounterForVolume(ctx context.Context, volume *structs.CSIVolume) (VolumeMounter, error) diff --git a/client/pluginmanager/csimanager/manager.go b/client/pluginmanager/csimanager/manager.go index b32caed75..9f932e384 100644 --- a/client/pluginmanager/csimanager/manager.go +++ b/client/pluginmanager/csimanager/manager.go @@ -72,12 +72,12 @@ func (c *csiManager) PluginManager() pluginmanager.PluginManager { func (c *csiManager) MounterForVolume(ctx context.Context, vol *structs.CSIVolume) (VolumeMounter, error) { nodePlugins, hasAnyNodePlugins := c.instances["csi-node"] if !hasAnyNodePlugins { - return nil, DriverNotFoundErr + return nil, PluginNotFoundErr } - mgr, hasDriver := nodePlugins[vol.Driver] - if !hasDriver { - return nil, DriverNotFoundErr + mgr, hasPlugin := nodePlugins[vol.PluginID] + if !hasPlugin { + return nil, PluginNotFoundErr } return mgr.VolumeMounter(ctx) From 6ee038d5150b46295eeb0f609f0fd98711b6440c Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 29 Jan 2020 12:35:35 +0100 Subject: [PATCH 048/126] helper/mount: Add mount helper package This package introduces some basic abstractions around mount utilties for various platforms. Initially it only supports linux, but the plan is to expand this as CSI expands across to other platforms. --- helper/mount/mount.go | 16 ++++++++++++++++ helper/mount/mount_linux.go | 31 +++++++++++++++++++++++++++++++ helper/mount/mount_unsupported.go | 25 +++++++++++++++++++++++++ 3 files changed, 72 insertions(+) create mode 100644 helper/mount/mount.go create mode 100644 helper/mount/mount_linux.go create mode 100644 helper/mount/mount_unsupported.go diff --git a/helper/mount/mount.go b/helper/mount/mount.go new file mode 100644 index 000000000..cd3e5f362 --- /dev/null +++ b/helper/mount/mount.go @@ -0,0 +1,16 @@ +package mount + +// Mounter defines the set of methods to allow for mount operations on a system. +type Mounter interface { + // IsNotAMountPoint detects if a provided directory is not a mountpoint. + IsNotAMountPoint(file string) (bool, error) + + // Mount will mount filesystem according to the specified configuration, on + // the condition that the target path is *not* already mounted. Options must + // be specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". + Mount(device, target, mountType, options string) error +} + +// Compile-time check to ensure all Mounter implementations satisfy +// the mount interface. +var _ Mounter = &mounter{} diff --git a/helper/mount/mount_linux.go b/helper/mount/mount_linux.go new file mode 100644 index 000000000..4e4f2ab69 --- /dev/null +++ b/helper/mount/mount_linux.go @@ -0,0 +1,31 @@ +// +build linux + +package mount + +import ( + docker_mount "github.com/docker/docker/pkg/mount" +) + +// mounter provides the default implementation of mount.Mounter +// for the linux platform. +// Currently it delegates to the docker `mount` package. +type mounter struct { +} + +// New returns a Mounter for the current system. +func New() Mounter { + return &mounter{} +} + +// IsNotAMountPoint determines if a directory is not a mountpoint. +// It does this by checking the path against the contents of /proc/self/mountinfo +func (m *mounter) IsNotAMountPoint(path string) (bool, error) { + isMount, err := docker_mount.Mounted(path) + return !isMount, err +} + +func (m *mounter) Mount(device, target, mountType, options string) error { + // Defer to the docker implementation of `Mount`, it's correct enough for our + // usecase and avoids us needing to shell out to the `mount` utility. + return docker_mount.Mount(device, target, mountType, options) +} diff --git a/helper/mount/mount_unsupported.go b/helper/mount/mount_unsupported.go new file mode 100644 index 000000000..8605aded4 --- /dev/null +++ b/helper/mount/mount_unsupported.go @@ -0,0 +1,25 @@ +// +build !linux + +package mount + +import ( + "errors" +) + +// mounter provides the default implementation of mount.Mounter +// for unsupported platforms. +type mounter struct { +} + +// New returns a Mounter for the current system. +func New() Mounter { + return &mounter{} +} + +func (m *mounter) IsNotAMountPoint(path string) (bool, error) { + return false, errors.New("Unsupported platform") +} + +func (m *mounter) Mount(device, target, mountType, options string) error { + return errors.New("Unsupported platform") +} From f1ab38e8451cd3e1fc6a265549b2bab4a90c7093 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 28 Jan 2020 13:19:56 +0100 Subject: [PATCH 049/126] volume_manager: Introduce helpers for staging This commit adds helpers that create and validate the staging directory for a given volume. It is currently missing usage options as the interfaces are not yet in place for those. The staging directory is only required when a volume has the STAGE_UNSTAGE Volume capability and has to live within the plugin root as the plugin needs to be able to create mounts inside it from within the container. --- client/pluginmanager/csimanager/volume.go | 31 +++++ .../pluginmanager/csimanager/volume_test.go | 108 ++++++++++++++++++ 2 files changed, 139 insertions(+) create mode 100644 client/pluginmanager/csimanager/volume_test.go diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index 158efd7b5..966caaff3 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -3,9 +3,12 @@ package csimanager import ( "context" "fmt" + "os" + "path/filepath" "time" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/helper/mount" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/csi" ) @@ -51,6 +54,34 @@ func newVolumeManager(logger hclog.Logger, plugin csi.CSIPlugin, rootDir string, } } +func (v *volumeManager) stagingDirForVolume(vol *structs.CSIVolume) string { + return filepath.Join(v.mountRoot, StagingDirName, vol.ID, "todo-provide-usage-options") +} + +// ensureStagingDir attempts to create a directory for use when staging a volume +// and then validates that the path is not already a mount point for e.g an +// existing volume stage. +// +// Returns whether the directory is a pre-existing mountpoint, the staging path, +// and any errors that occured. +func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (bool, string, error) { + stagingPath := v.stagingDirForVolume(vol) + + // Make the staging path, owned by the Nomad User + if err := os.MkdirAll(stagingPath, 0700); err != nil && !os.IsExist(err) { + return false, "", fmt.Errorf("failed to create staging directory for volume (%s): %v", vol.ID, err) + } + + // Validate that it is not already a mount point + m := mount.New() + isNotMount, err := m.IsNotAMountPoint(stagingPath) + if err != nil { + return false, "", fmt.Errorf("mount point detection failed for volume (%s): %v", vol.ID, err) + } + + return !isNotMount, stagingPath, nil +} + // MountVolume performs the steps required for using a given volume // configuration for the provided allocation. // diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go new file mode 100644 index 000000000..f69fc5d1c --- /dev/null +++ b/client/pluginmanager/csimanager/volume_test.go @@ -0,0 +1,108 @@ +package csimanager + +import ( + "io/ioutil" + "os" + "runtime" + "testing" + + "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/structs" + csifake "github.com/hashicorp/nomad/plugins/csi/fake" + "github.com/stretchr/testify/require" +) + +func tmpDir(t testing.TB) string { + t.Helper() + dir, err := ioutil.TempDir("", "nomad") + require.NoError(t, err) + return dir +} + +func TestVolumeManager_ensureStagingDir(t *testing.T) { + t.Parallel() + + cases := []struct { + Name string + Volume *structs.CSIVolume + CreateDirAheadOfTime bool + MountDirAheadOfTime bool + + ExpectedErr error + ExpectedMountState bool + }{ + { + Name: "Creates a directory when one does not exist", + Volume: &structs.CSIVolume{ID: "foo"}, + }, + { + Name: "Does not fail because of a pre-existing directory", + Volume: &structs.CSIVolume{ID: "foo"}, + CreateDirAheadOfTime: true, + }, + { + Name: "Returns negative mount info", + Volume: &structs.CSIVolume{ID: "foo"}, + }, + { + Name: "Returns positive mount info", + Volume: &structs.CSIVolume{ID: "foo"}, + CreateDirAheadOfTime: true, + MountDirAheadOfTime: true, + ExpectedMountState: true, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + // Step 1: Validate that the test case makes sense + if !tc.CreateDirAheadOfTime && tc.MountDirAheadOfTime { + require.Fail(t, "Cannot Mount without creating a dir") + } + + if tc.MountDirAheadOfTime { + // We can enable these tests by either mounting a fake device on linux + // e.g shipping a small ext4 image file and using that as a loopback + // device, but there's no convenient way to implement this. + t.Skip("TODO: Skipped because we don't detect bind mounts") + } + + // Step 2: Test Setup + tmpPath := tmpDir(t) + defer os.RemoveAll(tmpPath) + + csiFake := &csifake.Client{} + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, true) + expectedStagingPath := manager.stagingDirForVolume(tc.Volume) + + if tc.CreateDirAheadOfTime { + err := os.MkdirAll(expectedStagingPath, 0700) + require.NoError(t, err) + } + + // Step 3: Now we can do some testing + + detectedMount, path, testErr := manager.ensureStagingDir(tc.Volume) + if tc.ExpectedErr != nil { + require.EqualError(t, testErr, tc.ExpectedErr.Error()) + return // We don't perform extra validation if an error was detected. + } + + require.NoError(t, testErr) + require.Equal(t, tc.ExpectedMountState, detectedMount) + + // If the ensureStagingDir call had to create a directory itself, then here + // we validate that the directory exists and its permissions + if !tc.CreateDirAheadOfTime { + file, err := os.Lstat(path) + require.NoError(t, err) + require.True(t, file.IsDir()) + + // TODO: Figure out a windows equivalent of this test + if runtime.GOOS != "windows" { + require.Equal(t, os.FileMode(0700), file.Mode().Perm()) + } + } + }) + } +} From 65d9ddc9af8dcb09baa601eee7a86aff6aad97e4 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 29 Jan 2020 12:32:31 +0100 Subject: [PATCH 050/126] csi: Expose grpc.CallOptions for NodeStageVolume --- plugins/csi/client.go | 4 ++-- plugins/csi/fake/client.go | 3 ++- plugins/csi/plugin.go | 3 ++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 0766c8e9a..30d17ee8d 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -299,7 +299,7 @@ func (c *client) NodeGetInfo(ctx context.Context) (*NodeGetInfoResponse, error) return result, nil } -func (c *client) NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *VolumeCapability) error { +func (c *client) NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *VolumeCapability, opts ...grpc.CallOption) error { if c == nil { return fmt.Errorf("Client not initialized") } @@ -325,7 +325,7 @@ func (c *client) NodeStageVolume(ctx context.Context, volumeID string, publishCo // NodeStageVolume's response contains no extra data. If err == nil, we were // successful. - _, err := c.nodeClient.NodeStageVolume(ctx, req) + _, err := c.nodeClient.NodeStageVolume(ctx, req, opts...) return err } diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index c8e0cfd55..06c4c6f08 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/csi" "github.com/hashicorp/nomad/plugins/shared/hclspec" + "google.golang.org/grpc" ) var _ csi.CSIPlugin = &Client{} @@ -161,7 +162,7 @@ func (c *Client) NodeGetInfo(ctx context.Context) (*csi.NodeGetInfoResponse, err // NodeStageVolume is used when a plugin has the STAGE_UNSTAGE volume capability // to prepare a volume for usage on a host. If err == nil, the response should // be assumed to be successful. -func (c *Client) NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *csi.VolumeCapability) error { +func (c *Client) NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *csi.VolumeCapability, opts ...grpc.CallOption) error { c.Mu.Lock() defer c.Mu.Unlock() diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 172aa01d7..d41a71b3c 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -7,6 +7,7 @@ import ( csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" "github.com/hashicorp/nomad/plugins/base" + "google.golang.org/grpc" ) // CSIPlugin implements a lightweight abstraction layer around a CSI Plugin. @@ -46,7 +47,7 @@ type CSIPlugin interface { // NodeStageVolume is used when a plugin has the STAGE_UNSTAGE volume capability // to prepare a volume for usage on a host. If err == nil, the response should // be assumed to be successful. - NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *VolumeCapability) error + NodeStageVolume(ctx context.Context, volumeID string, publishContext map[string]string, stagingTargetPath string, capabilities *VolumeCapability, opts ...grpc.CallOption) error // NodeUnstageVolume is used when a plugin has the STAGE_UNSTAGE volume capability // to undo the work performed by NodeStageVolume. If a volume has been staged, From 6e71baa77d658d07db09c42099a3baed35ee452f Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 29 Jan 2020 13:20:41 +0100 Subject: [PATCH 051/126] volume_manager: NodeStageVolume Support This commit introduces support for staging volumes when a plugin implements the STAGE_UNSTAGE_VOLUME capability. See the following for further reference material: https://github.com/container-storage-interface/spec/blob/4731db0e0bc53238b93850f43ab05d9355df0fd9/spec.md#nodestagevolume --- client/pluginmanager/csimanager/volume.go | 86 ++++++++++++++++++- .../pluginmanager/csimanager/volume_test.go | 71 +++++++++++++++ 2 files changed, 156 insertions(+), 1 deletion(-) diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index 966caaff3..f7f53d969 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -7,6 +7,7 @@ import ( "path/filepath" "time" + grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/helper/mount" "github.com/hashicorp/nomad/nomad/structs" @@ -63,7 +64,7 @@ func (v *volumeManager) stagingDirForVolume(vol *structs.CSIVolume) string { // existing volume stage. // // Returns whether the directory is a pre-existing mountpoint, the staging path, -// and any errors that occured. +// and any errors that occurred. func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (bool, string, error) { stagingPath := v.stagingDirForVolume(vol) @@ -82,11 +83,94 @@ func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (bool, string, return !isNotMount, stagingPath, nil } +// stageVolume prepares a volume for use by allocations. When a plugin exposes +// the STAGE_UNSTAGE_VOLUME capability it MUST be called once-per-volume for a +// given usage mode before the volume can be NodePublish-ed. +func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) error { + logger := hclog.FromContext(ctx) + logger.Trace("Preparing volume staging environment") + existingMount, stagingPath, err := v.ensureStagingDir(vol) + if err != nil { + return err + } + logger.Trace("Volume staging environment", "pre-existing_mount", existingMount, "staging_path", stagingPath) + + if existingMount { + logger.Debug("re-using existing staging mount for volume", "staging_path", stagingPath) + return nil + } + + var accessType csi.VolumeAccessType + switch vol.AttachmentMode { + case structs.CSIVolumeAttachmentModeBlockDevice: + accessType = csi.VolumeAccessTypeBlock + case structs.CSIVolumeAttachmentModeFilesystem: + accessType = csi.VolumeAccessTypeMount + default: + // These fields are validated during job submission, but here we perform a + // final check during transformation into the requisite CSI Data type to + // defend against development bugs and corrupted state - and incompatible + // nomad versions in the future. + return fmt.Errorf("Unknown volume attachment mode: %s", vol.AttachmentMode) + } + + var accessMode csi.VolumeAccessMode + switch vol.AccessMode { + case structs.CSIVolumeAccessModeSingleNodeReader: + accessMode = csi.VolumeAccessModeSingleNodeReaderOnly + case structs.CSIVolumeAccessModeSingleNodeWriter: + accessMode = csi.VolumeAccessModeSingleNodeWriter + case structs.CSIVolumeAccessModeMultiNodeMultiWriter: + accessMode = csi.VolumeAccessModeMultiNodeMultiWriter + case structs.CSIVolumeAccessModeMultiNodeSingleWriter: + accessMode = csi.VolumeAccessModeMultiNodeSingleWriter + case structs.CSIVolumeAccessModeMultiNodeReader: + accessMode = csi.VolumeAccessModeMultiNodeReaderOnly + default: + // These fields are validated during job submission, but here we perform a + // final check during transformation into the requisite CSI Data type to + // defend against development bugs and corrupted state - and incompatible + // nomad versions in the future. + return fmt.Errorf("Unknown volume access mode: %v", vol.AccessMode) + } + + // We currently treat all explicit CSI NodeStageVolume errors (aside from timeouts, codes.ResourceExhausted, and codes.Unavailable) + // as fatal. + // In the future, we can provide more useful error messages based on + // different types of error. For error documentation see: + // https://github.com/container-storage-interface/spec/blob/4731db0e0bc53238b93850f43ab05d9355df0fd9/spec.md#nodestagevolume-errors + return v.plugin.NodeStageVolume(ctx, + vol.ID, + nil, /* TODO: Get publishContext from Server */ + stagingPath, + &csi.VolumeCapability{ + AccessType: accessType, + AccessMode: accessMode, + VolumeMountOptions: &csi.VolumeMountOptions{ + // GH-7007: Currently we have no way to provide these + }, + }, + grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), + grpc_retry.WithMax(3), + grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), + ) +} + // MountVolume performs the steps required for using a given volume // configuration for the provided allocation. // // TODO: Validate remote volume attachment and implement. func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) (*MountInfo, error) { + logger := v.logger.With("volume_id", vol.ID) + ctx = hclog.WithContext(ctx, logger) + + if v.requiresStaging { + err := v.stageVolume(ctx, vol) + if err != nil { + return nil, err + } + } + return nil, fmt.Errorf("Unimplemented") } diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index f69fc5d1c..cdd70920b 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -1,6 +1,8 @@ package csimanager import ( + "context" + "errors" "io/ioutil" "os" "runtime" @@ -106,3 +108,72 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) { }) } } + +func TestVolumeManager_stageVolume(t *testing.T) { + t.Parallel() + cases := []struct { + Name string + Volume *structs.CSIVolume + PluginErr error + ExpectedErr error + }{ + { + Name: "Returns an error when an invalid AttachmentMode is provided", + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: "nonsense", + }, + ExpectedErr: errors.New("Unknown volume attachment mode: nonsense"), + }, + { + Name: "Returns an error when an invalid AccessMode is provided", + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, + AccessMode: "nonsense", + }, + ExpectedErr: errors.New("Unknown volume access mode: nonsense"), + }, + { + Name: "Returns an error when the plugin returns an error", + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + }, + PluginErr: errors.New("Some Unknown Error"), + ExpectedErr: errors.New("Some Unknown Error"), + }, + { + Name: "Happy Path", + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + }, + PluginErr: nil, + ExpectedErr: nil, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + tmpPath := tmpDir(t) + defer os.RemoveAll(tmpPath) + + csiFake := &csifake.Client{} + csiFake.NextNodeStageVolumeErr = tc.PluginErr + + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, true) + ctx := context.Background() + + err := manager.stageVolume(ctx, tc.Volume) + + if tc.ExpectedErr != nil { + require.EqualError(t, err, tc.ExpectedErr.Error()) + } else { + require.NoError(t, err) + } + }) + } +} From 8673ea5cba86c10da666d4df1d374281f2569c31 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 30 Jan 2020 08:15:56 -0500 Subject: [PATCH 052/126] csi: add empty CSI volume publication GC to scheduled core jobs (#7014) This changeset adds a new core job `CoreJobCSIVolumePublicationGC` to the leader's loop for scheduling core job evals. Right now this is an empty method body without even a config file stanza. Later changesets will implement the logic of volume publication GC. --- nomad/config.go | 5 +++++ nomad/core_sched.go | 9 +++++++++ nomad/core_sched_test.go | 31 +++++++++++++++++++++++++++++++ nomad/leader.go | 6 ++++++ nomad/structs/structs.go | 5 +++++ 5 files changed, 56 insertions(+) diff --git a/nomad/config.go b/nomad/config.go index 8a0fb7a18..2aced4830 100644 --- a/nomad/config.go +++ b/nomad/config.go @@ -191,6 +191,10 @@ type Config struct { // for GC. This gives users some time to view terminal deployments. DeploymentGCThreshold time.Duration + // CSIVolumePublicationGCInterval is how often we dispatch a job to GC + // unclaimed CSI volume publications. + CSIVolumePublicationGCInterval time.Duration + // EvalNackTimeout controls how long we allow a sub-scheduler to // work on an evaluation before we consider it failed and Nack it. // This allows that evaluation to be handed to another sub-scheduler @@ -377,6 +381,7 @@ func DefaultConfig() *Config { NodeGCThreshold: 24 * time.Hour, DeploymentGCInterval: 5 * time.Minute, DeploymentGCThreshold: 1 * time.Hour, + CSIVolumePublicationGCInterval: 60 * time.Second, EvalNackTimeout: 60 * time.Second, EvalDeliveryLimit: 3, EvalNackInitialReenqueueDelay: 1 * time.Second, diff --git a/nomad/core_sched.go b/nomad/core_sched.go index 1fb7330ea..351bf25a7 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -50,6 +50,8 @@ func (c *CoreScheduler) Process(eval *structs.Evaluation) error { return c.jobGC(eval) case structs.CoreJobDeploymentGC: return c.deploymentGC(eval) + case structs.CoreJobCSIVolumePublicationGC: + return c.csiVolumePublicationGC(eval) case structs.CoreJobForceGC: return c.forceGC(eval) default: @@ -703,3 +705,10 @@ func allocGCEligible(a *structs.Allocation, job *structs.Job, gcTime time.Time, return timeDiff > interval.Nanoseconds() } + +// csiVolumeGC is used to garbage collect CSI volume publications +func (c *CoreScheduler) csiVolumePublicationGC(eval *structs.Evaluation) error { + // TODO: implement me! + c.logger.Trace("garbage collecting unclaimed CSI volume publications") + return nil +} diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 70b500a82..92f12b911 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -1836,6 +1836,37 @@ func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { } } +// TODO: this is an empty test until CoreScheduler.csiVolumePublicationGC is implemented +func TestCoreScheduler_CSIVolumePublicationGC(t *testing.T) { + t.Parallel() + + s1, cleanupS1 := TestServer(t, nil) + defer cleanupS1() + testutil.WaitForLeader(t, s1.RPC) + assert := assert.New(t) + + // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 + s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) + + // TODO: insert volumes for nodes + state := s1.fsm.State() + + // Update the time tables to make this work + tt := s1.fsm.TimeTable() + tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.CSIVolumePublicationGCInterval)) + + // Create a core scheduler + snap, err := state.Snapshot() + assert.Nil(err, "Snapshot") + core := NewCoreScheduler(s1, snap) + + // Attempt the GC + gc := s1.coreJobEval(structs.CoreJobCSIVolumePublicationGC, 2000) + assert.Nil(core.Process(gc), "Process GC") + + // TODO: assert state is cleaned up +} + func TestCoreScheduler_PartitionEvalReap(t *testing.T) { t.Parallel() diff --git a/nomad/leader.go b/nomad/leader.go index b43d4abd2..98529fcc1 100644 --- a/nomad/leader.go +++ b/nomad/leader.go @@ -519,6 +519,8 @@ func (s *Server) schedulePeriodic(stopCh chan struct{}) { defer jobGC.Stop() deploymentGC := time.NewTicker(s.config.DeploymentGCInterval) defer deploymentGC.Stop() + csiVolumePublicationGC := time.NewTicker(s.config.CSIVolumePublicationGCInterval) + defer csiVolumePublicationGC.Stop() // getLatest grabs the latest index from the state store. It returns true if // the index was retrieved successfully. @@ -551,6 +553,10 @@ func (s *Server) schedulePeriodic(stopCh chan struct{}) { if index, ok := getLatest(); ok { s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobDeploymentGC, index)) } + case <-csiVolumePublicationGC.C: + if index, ok := getLatest(); ok { + s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobCSIVolumePublicationGC, index)) + } case <-stopCh: return } diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 03c981794..5a62fc347 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -8694,6 +8694,11 @@ const ( // check if they are terminal. If so, we delete these out of the system. CoreJobDeploymentGC = "deployment-gc" + // CoreJobCSIVolumePublicationGC is use for the garbage collection of CSI + // volume publications. We periodically scan volumes to see if no allocs are + // claiming them. If so, we unpublish the volume. + CoreJobCSIVolumePublicationGC = "csi-volume-publication-gc" + // CoreJobForceGC is used to force garbage collection of all GCable objects. CoreJobForceGC = "force-gc" ) From add55e37b844c992e37287f9160e3fb701628062 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 31 Jan 2020 12:11:17 +0100 Subject: [PATCH 053/126] csi: Expose gRPC Options on NodeUnstageVolume --- plugins/csi/client.go | 4 ++-- plugins/csi/fake/client.go | 2 +- plugins/csi/plugin.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 30d17ee8d..9ac4013b5 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -329,7 +329,7 @@ func (c *client) NodeStageVolume(ctx context.Context, volumeID string, publishCo return err } -func (c *client) NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string) error { +func (c *client) NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string, opts ...grpc.CallOption) error { if c == nil { return fmt.Errorf("Client not initialized") } @@ -352,7 +352,7 @@ func (c *client) NodeUnstageVolume(ctx context.Context, volumeID string, staging // NodeUnstageVolume's response contains no extra data. If err == nil, we were // successful. - _, err := c.nodeClient.NodeUnstageVolume(ctx, req) + _, err := c.nodeClient.NodeUnstageVolume(ctx, req, opts...) return err } diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index 06c4c6f08..58fff1f6e 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -176,7 +176,7 @@ func (c *Client) NodeStageVolume(ctx context.Context, volumeID string, publishCo // this RPC must be called before freeing the volume. // // If err == nil, the response should be assumed to be successful. -func (c *Client) NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string) error { +func (c *Client) NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string, opts ...grpc.CallOption) error { c.Mu.Lock() defer c.Mu.Unlock() diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index d41a71b3c..8312c89ea 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -54,7 +54,7 @@ type CSIPlugin interface { // this RPC must be called before freeing the volume. // // If err == nil, the response should be assumed to be successful. - NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string) error + NodeUnstageVolume(ctx context.Context, volumeID string, stagingTargetPath string, opts ...grpc.CallOption) error // NodePublishVolume is used to prepare a volume for use by an allocation. // if err == nil the response should be assumed to be successful. From e619ae5a4268b865ef0330ba37268840f6a3cf2c Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 31 Jan 2020 12:11:40 +0100 Subject: [PATCH 054/126] volume_manager: Initial support for unstaging volumes --- client/pluginmanager/csimanager/volume.go | 34 ++++++++++-- .../pluginmanager/csimanager/volume_test.go | 53 +++++++++++++++++++ 2 files changed, 84 insertions(+), 3 deletions(-) diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index f7f53d969..2701e9159 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -161,7 +161,7 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) // // TODO: Validate remote volume attachment and implement. func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) (*MountInfo, error) { - logger := v.logger.With("volume_id", vol.ID) + logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) ctx = hclog.WithContext(ctx, logger) if v.requiresStaging { @@ -174,6 +174,34 @@ func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, return nil, fmt.Errorf("Unimplemented") } -func (v *volumeManager) UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) error { - return fmt.Errorf("Unimplemented") +// unstageVolume is the inverse operation of `stageVolume` and must be called +// once for each staging path that a volume has been staged under. +// It is safe to call multiple times and a plugin is required to return OK if +// the volume has been unstaged or was never staged on the node. +func (v *volumeManager) unstageVolume(ctx context.Context, vol *structs.CSIVolume) error { + logger := hclog.FromContext(ctx) + logger.Trace("Unstaging volume") + stagingPath := v.stagingDirForVolume(vol) + return v.plugin.NodeUnstageVolume(ctx, + vol.ID, + stagingPath, + grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), + grpc_retry.WithMax(3), + grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), + ) +} + +func (v *volumeManager) UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) error { + logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) + ctx = hclog.WithContext(ctx, logger) + + // TODO(GH-7030): NodeUnpublishVolume + + if !v.requiresStaging { + return nil + } + + // TODO(GH-7029): Implement volume usage tracking and only unstage volumes + // when the last alloc stops using it. + return v.unstageVolume(ctx, vol) } diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index cdd70920b..687bfbc25 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -177,3 +177,56 @@ func TestVolumeManager_stageVolume(t *testing.T) { }) } } + +func TestVolumeManager_unstageVolume(t *testing.T) { + t.Parallel() + cases := []struct { + Name string + Volume *structs.CSIVolume + PluginErr error + ExpectedErr error + ExpectedCSICallCount int64 + }{ + { + Name: "Returns an error when the plugin returns an error", + Volume: &structs.CSIVolume{ + ID: "foo", + }, + PluginErr: errors.New("Some Unknown Error"), + ExpectedErr: errors.New("Some Unknown Error"), + ExpectedCSICallCount: 1, + }, + { + Name: "Happy Path", + Volume: &structs.CSIVolume{ + ID: "foo", + }, + PluginErr: nil, + ExpectedErr: nil, + ExpectedCSICallCount: 1, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + tmpPath := tmpDir(t) + defer os.RemoveAll(tmpPath) + + csiFake := &csifake.Client{} + csiFake.NextNodeUnstageVolumeErr = tc.PluginErr + + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, true) + ctx := context.Background() + + err := manager.unstageVolume(ctx, tc.Volume) + + if tc.ExpectedErr != nil { + require.EqualError(t, err, tc.ExpectedErr.Error()) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodeUnstageVolumeCallCount) + }) + } +} From a0a67667407ebec6ec789b283e1d9bff2f870744 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Fri, 31 Jan 2020 10:13:21 -0500 Subject: [PATCH 055/126] CSI: Scheduler knows about CSI constraints and availability (#6995) * structs: piggyback csi volumes on host volumes for job specs * state_store: CSIVolumeByID always includes plugins, matches usecase * scheduler/feasible: csi volume checker * scheduler/stack: add csi volumes * contributing: update rpc checklist * scheduler: add volumes to State interface * scheduler/feasible: introduce new checker collection tgAvailable * scheduler/stack: taskGroupCSIVolumes checker is transient * state_store CSIVolumeDenormalizePlugins comment clarity * structs: remote TODO comment in TaskGroup Validate * scheduler/feasible: CSIVolumeChecker hasPlugins improve comment * scheduler/feasible_test: set t.Parallel * Update nomad/state/state_store.go Co-Authored-By: Danielle * Update scheduler/feasible.go Co-Authored-By: Danielle * structs: lift ControllerRequired to each volume * state_store: store plug.ControllerRequired, use it for volume health * feasible: csi match fast path remove stale host volume copied logic * scheduler/feasible: improve comments Co-authored-by: Danielle --- contributing/checklist-rpc-endpoint.md | 7 ++ nomad/state/state_store.go | 35 +++---- nomad/structs/csi.go | 2 + nomad/structs/structs.go | 4 +- scheduler/feasible.go | 112 ++++++++++++++++++++-- scheduler/feasible_test.go | 124 ++++++++++++++++++++++++- scheduler/scheduler.go | 3 + scheduler/stack.go | 14 ++- scheduler/stack_oss.go | 11 ++- scheduler/stack_test.go | 72 ++++++++++++++ 10 files changed, 344 insertions(+), 40 deletions(-) diff --git a/contributing/checklist-rpc-endpoint.md b/contributing/checklist-rpc-endpoint.md index 29ed912dc..8726cbfad 100644 --- a/contributing/checklist-rpc-endpoint.md +++ b/contributing/checklist-rpc-endpoint.md @@ -7,20 +7,27 @@ Prefer adding a new message to changing any existing RPC messages. * [ ] `Request` struct and `*RequestType` constant in `nomad/structs/structs.go`. Append the constant, old constant values must remain unchanged + * [ ] In `nomad/fsm.go`, add a dispatch case to the switch statement in `(n *nomadFSM) Apply` * `*nomadFSM` method to decode the request and call the state method + * [ ] State method for modifying objects in a `Txn` in `nomad/state/state_store.go` * `nomad/state/state_store_test.go` + * [ ] Handler for the request in `nomad/foo_endpoint.go` * RPCs are resolved by matching the method name for bound structs [net/rpc](https://golang.org/pkg/net/rpc/) * Check ACLs for security, list endpoints filter by ACL * Register new RPC struct in `nomad/server.go` + * Check ACLs to enforce security + * Wrapper for the HTTP request in `command/agent/foo_endpoint.go` * Backwards compatibility requires a new endpoint, an upgraded client or server may be forwarding this request to an old server, without support for the new RPC * RPCs triggered by an internal process may not need support + * Check ACLs as an optimization + * [ ] `nomad/core_sched.go` sends many RPCs * `ServersMeetMinimumVersion` asserts that the server cluster is upgraded, so use this to gaurd sending the new RPC, else send the old RPC diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index d8860de94..4d578bfc3 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -957,6 +957,7 @@ func upsertNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) erro plug = raw.(*structs.CSIPlugin).Copy(index) } else { plug = structs.NewCSIPlugin(info.PluginID, index) + plug.ControllerRequired = info.RequiresControllerPlugin } plug.AddPlugin(node.ID, info, index) @@ -1647,7 +1648,7 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum return nil } -// CSIVolumeByID is used to lookup a single volume +// CSIVolumeByID is used to lookup a single volume. Its plugins are denormalized to provide accurate Health func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, id string) (*structs.CSIVolume, error) { txn := s.db.Txn(false) @@ -1662,10 +1663,7 @@ func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, id string) (*structs.CSIVo } vol := obj.(*structs.CSIVolume) - // Health data is stale, so set this volume unhealthy until it's denormalized - vol.Healthy = false - - return vol, nil + return s.CSIVolumeDenormalizePlugins(ws, vol) } // CSIVolumes looks up csi_volumes by pluginID @@ -1839,21 +1837,10 @@ func (s *StateStore) deleteJobCSIPlugins(index uint64, job *structs.Job, txn *me return nil } -// CSIVolumeDenormalize takes a CSIVolume and denormalizes for the API -func (s *StateStore) CSIVolumeDenormalize(ws memdb.WatchSet, vol *structs.CSIVolume) (*structs.CSIVolume, error) { - if vol == nil { - return nil, nil - } - - vol, err := s.CSIVolumeDenormalizePlugins(ws, vol) - if err != nil { - return nil, err - } - - return s.csiVolumeDenormalizeAllocs(ws, vol) -} - -// CSIVolumeDenormalize returns a CSIVolume with current health and plugins +// CSIVolumeDenormalizePlugins returns a CSIVolume with current health and plugins, but +// without allocations +// Use this for current volume metadata, handling lists of volumes +// Use CSIVolumeDenormalize for volumes containing both health and current allocations func (s *StateStore) CSIVolumeDenormalizePlugins(ws memdb.WatchSet, vol *structs.CSIVolume) (*structs.CSIVolume, error) { if vol == nil { return nil, nil @@ -1874,6 +1861,7 @@ func (s *StateStore) CSIVolumeDenormalizePlugins(ws memdb.WatchSet, vol *structs return vol, nil } + vol.ControllerRequired = plug.ControllerRequired vol.ControllersHealthy = plug.ControllersHealthy vol.NodesHealthy = plug.NodesHealthy // This number is incorrect! The expected number of node plugins is actually this + @@ -1881,13 +1869,16 @@ func (s *StateStore) CSIVolumeDenormalizePlugins(ws memdb.WatchSet, vol *structs vol.ControllersExpected = len(plug.Controllers) vol.NodesExpected = len(plug.Nodes) - vol.Healthy = vol.ControllersHealthy > 0 && vol.NodesHealthy > 0 + vol.Healthy = vol.NodesHealthy > 0 + if vol.ControllerRequired { + vol.Healthy = vol.ControllersHealthy > 0 && vol.Healthy + } return vol, nil } // csiVolumeDenormalizeAllocs returns a CSIVolume with allocations -func (s *StateStore) csiVolumeDenormalizeAllocs(ws memdb.WatchSet, vol *structs.CSIVolume) (*structs.CSIVolume, error) { +func (s *StateStore) CSIVolumeDenormalize(ws memdb.WatchSet, vol *structs.CSIVolume) (*structs.CSIVolume, error) { for id := range vol.ReadAllocs { a, err := s.AllocByID(ws, id) if err != nil { diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index bcb0a8845..f0dcc0251 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -153,6 +153,7 @@ type CSIVolume struct { Healthy bool VolumeGC time.Time PluginID string + ControllerRequired bool ControllersHealthy int ControllersExpected int NodesHealthy int @@ -457,6 +458,7 @@ type CSIPlugin struct { // controller tasks for this plugin. It is addressed by [job.Namespace][job.ID] Jobs map[string]map[string]*Job + ControllerRequired bool ControllersHealthy int Controllers map[string]*CSIInfo NodesHealthy int diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 5a62fc347..d993cc7c9 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -5179,8 +5179,8 @@ func (tg *TaskGroup) Validate(j *Job) error { // Validate the Host Volumes for name, decl := range tg.Volumes { - if decl.Type != VolumeTypeHost { - // TODO: Remove this error when adding new volume types + if !(decl.Type == VolumeTypeHost || + decl.Type == VolumeTypeCSI) { mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume %s has unrecognised type %s", name, decl.Type)) continue } diff --git a/scheduler/feasible.go b/scheduler/feasible.go index fbe184bb3..b92eec137 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -7,12 +7,20 @@ import ( "strconv" "strings" + memdb "github.com/hashicorp/go-memdb" version "github.com/hashicorp/go-version" "github.com/hashicorp/nomad/helper/constraints/semver" "github.com/hashicorp/nomad/nomad/structs" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" ) +const ( + FilterConstraintHostVolumes = "missing compatible host volumes" + FilterConstraintCSIVolumes = "missing CSI plugins" + FilterConstraintDrivers = "missing drivers" + FilterConstraintDevices = "missing devices" +) + // FeasibleIterator is used to iteratively yield nodes that // match feasibility constraints. The iterators may manage // some state for performance optimizations. @@ -61,14 +69,14 @@ func (iter *StaticIterator) Next() *structs.Node { // Check if exhausted n := len(iter.nodes) if iter.offset == n || iter.seen == n { - if iter.seen != n { + if iter.seen != n { // seen has been Reset() to 0 iter.offset = 0 } else { return nil } } - // Return the next offset + // Return the next offset, use this one offset := iter.offset iter.offset += 1 iter.seen += 1 @@ -135,7 +143,7 @@ func (h *HostVolumeChecker) Feasible(candidate *structs.Node) bool { return true } - h.ctx.Metrics().FilterNode(candidate, "missing compatible host volumes") + h.ctx.Metrics().FilterNode(candidate, FilterConstraintHostVolumes) return false } @@ -177,6 +185,67 @@ func (h *HostVolumeChecker) hasVolumes(n *structs.Node) bool { return true } +type CSIVolumeChecker struct { + ctx Context + volumes map[string]*structs.VolumeRequest +} + +func NewCSIVolumeChecker(ctx Context) *CSIVolumeChecker { + return &CSIVolumeChecker{ + ctx: ctx, + } +} + +func (c *CSIVolumeChecker) SetVolumes(volumes map[string]*structs.VolumeRequest) { + c.volumes = volumes +} + +func (c *CSIVolumeChecker) Feasible(n *structs.Node) bool { + if c.hasPlugins(n) { + return true + } + + c.ctx.Metrics().FilterNode(n, FilterConstraintCSIVolumes) + return false +} + +func (c *CSIVolumeChecker) hasPlugins(n *structs.Node) bool { + // We can mount the volume if + // - if required, a healthy controller plugin is running the driver + // - the volume has free claims + // - this node is running the node plugin, implies matching topology + + // Fast path: Requested no volumes. No need to check further. + if len(c.volumes) == 0 { + return true + } + + ws := memdb.NewWatchSet() + for _, req := range c.volumes { + // Check that this node has a healthy running plugin with the right PluginID + plugin, ok := n.CSINodePlugins[req.Name] + if !(ok && plugin.Healthy) { + return false + } + + // Get the volume to check that it's healthy (there's a healthy controller + // and the volume hasn't encountered an error or been marked for GC + vol, err := c.ctx.State().CSIVolumeByID(ws, req.Source) + + if err != nil || vol == nil { + return false + } + + if (req.ReadOnly && !vol.CanReadOnly()) || + !vol.CanWrite() { + return false + } + + } + + return true +} + // DriverChecker is a FeasibilityChecker which returns whether a node has the // drivers necessary to scheduler a task group. type DriverChecker struct { @@ -201,7 +270,7 @@ func (c *DriverChecker) Feasible(option *structs.Node) bool { if c.hasDrivers(option) { return true } - c.ctx.Metrics().FilterNode(option, "missing drivers") + c.ctx.Metrics().FilterNode(option, FilterConstraintDrivers) return false } @@ -780,18 +849,20 @@ type FeasibilityWrapper struct { source FeasibleIterator jobCheckers []FeasibilityChecker tgCheckers []FeasibilityChecker + tgAvailable []FeasibilityChecker tg string } // NewFeasibilityWrapper returns a FeasibleIterator based on the passed source // and FeasibilityCheckers. func NewFeasibilityWrapper(ctx Context, source FeasibleIterator, - jobCheckers, tgCheckers []FeasibilityChecker) *FeasibilityWrapper { + jobCheckers, tgCheckers, tgAvailable []FeasibilityChecker) *FeasibilityWrapper { return &FeasibilityWrapper{ ctx: ctx, source: source, jobCheckers: jobCheckers, tgCheckers: tgCheckers, + tgAvailable: tgAvailable, } } @@ -858,7 +929,12 @@ OUTER: continue case EvalComputedClassEligible: // Fast path the eligible case - return option + if w.available(option) { + return option + } + // We match the class but are temporarily unavailable, the eval + // should be blocked + return nil case EvalComputedClassEscaped: tgEscaped = true case EvalComputedClassUnknown: @@ -884,10 +960,32 @@ OUTER: evalElig.SetTaskGroupEligibility(true, w.tg, option.ComputedClass) } + // tgAvailable handlers are available transiently, so we test them without + // affecting the computed class + if !w.available(option) { + continue OUTER + } + return option } } +// available checks transient feasibility checkers which depend on changing conditions, +// e.g. the health status of a plugin or driver +func (w *FeasibilityWrapper) available(option *structs.Node) bool { + // If we don't have any availability checks, we're available + if len(w.tgAvailable) == 0 { + return true + } + + for _, check := range w.tgAvailable { + if !check.Feasible(option) { + return false + } + } + return true +} + // DeviceChecker is a FeasibilityChecker which returns whether a node has the // devices necessary to scheduler a task group. type DeviceChecker struct { @@ -920,7 +1018,7 @@ func (c *DeviceChecker) Feasible(option *structs.Node) bool { return true } - c.ctx.Metrics().FilterNode(option, "missing devices") + c.ctx.Metrics().FilterNode(option, FilterConstraintDevices) return false } diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index 62b39acf8..f738f2dc7 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -223,6 +223,120 @@ func TestHostVolumeChecker_ReadOnly(t *testing.T) { Result: true, }, } + for i, c := range cases { + checker.SetVolumes(c.RequestedVolumes) + if act := checker.Feasible(c.Node); act != c.Result { + t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result) + } + } +} + +func TestCSIVolumeChecker(t *testing.T) { + t.Parallel() + state, ctx := testContext(t) + nodes := []*structs.Node{ + mock.Node(), + mock.Node(), + mock.Node(), + mock.Node(), + } + + // Register running plugins on some nodes + nodes[0].CSIControllerPlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{}, + }, + } + nodes[0].CSINodePlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + nodes[1].CSINodePlugins = map[string]*structs.CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: false, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + nodes[2].CSINodePlugins = map[string]*structs.CSIInfo{ + "bar": { + PluginID: "bar", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + + // Create the plugins in the state store + index := uint64(999) + for _, node := range nodes { + err := state.UpsertNode(index, node) + require.NoError(t, err) + index++ + } + + // Create the volume in the state store + vid := "volume-id" + vol := structs.NewCSIVolume(vid) + vol.PluginID = "foo" + vol.Namespace = structs.DefaultNamespace + vol.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter + vol.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + err := state.CSIVolumeRegister(index, []*structs.CSIVolume{vol}) + require.NoError(t, err) + + // Create volume requests + noVolumes := map[string]*structs.VolumeRequest{} + + volumes := map[string]*structs.VolumeRequest{ + "foo": { + Type: "csi", + Name: "foo", + Source: "volume-id", + }, + } + + checker := NewCSIVolumeChecker(ctx) + cases := []struct { + Node *structs.Node + RequestedVolumes map[string]*structs.VolumeRequest + Result bool + }{ + { // Get it + Node: nodes[0], + RequestedVolumes: volumes, + Result: true, + }, + { // Unhealthy + Node: nodes[1], + RequestedVolumes: volumes, + Result: false, + }, + { // Wrong id + Node: nodes[2], + RequestedVolumes: volumes, + Result: false, + }, + { // No Volumes requested or available + Node: nodes[3], + RequestedVolumes: noVolumes, + Result: true, + }, + { // No Volumes requested, some available + Node: nodes[0], + RequestedVolumes: noVolumes, + Result: true, + }, + { // Volumes requested, none available + Node: nodes[3], + RequestedVolumes: volumes, + Result: false, + }, + } for i, c := range cases { checker.SetVolumes(c.RequestedVolumes) @@ -1859,7 +1973,7 @@ func TestFeasibilityWrapper_JobIneligible(t *testing.T) { nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) mocked := newMockFeasibilityChecker(false) - wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil) + wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil, nil) // Set the job to ineligible ctx.Eligibility().SetJobEligibility(false, nodes[0].ComputedClass) @@ -1877,7 +1991,7 @@ func TestFeasibilityWrapper_JobEscapes(t *testing.T) { nodes := []*structs.Node{mock.Node()} static := NewStaticIterator(ctx, nodes) mocked := newMockFeasibilityChecker(false) - wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil) + wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil, nil) // Set the job to escaped cc := nodes[0].ComputedClass @@ -1903,7 +2017,7 @@ func TestFeasibilityWrapper_JobAndTg_Eligible(t *testing.T) { static := NewStaticIterator(ctx, nodes) jobMock := newMockFeasibilityChecker(true) tgMock := newMockFeasibilityChecker(false) - wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}) + wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}, nil) // Set the job to escaped cc := nodes[0].ComputedClass @@ -1925,7 +2039,7 @@ func TestFeasibilityWrapper_JobEligible_TgIneligible(t *testing.T) { static := NewStaticIterator(ctx, nodes) jobMock := newMockFeasibilityChecker(true) tgMock := newMockFeasibilityChecker(false) - wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}) + wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}, nil) // Set the job to escaped cc := nodes[0].ComputedClass @@ -1947,7 +2061,7 @@ func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) { static := NewStaticIterator(ctx, nodes) jobMock := newMockFeasibilityChecker(true) tgMock := newMockFeasibilityChecker(true) - wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}) + wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock}, nil) // Set the job to escaped cc := nodes[0].ComputedClass diff --git a/scheduler/scheduler.go b/scheduler/scheduler.go index 639b2b8cf..f7b81ce7d 100644 --- a/scheduler/scheduler.go +++ b/scheduler/scheduler.go @@ -91,6 +91,9 @@ type State interface { // SchedulerConfig returns config options for the scheduler SchedulerConfig() (uint64, *structs.SchedulerConfiguration, error) + + // CSIVolumeByID fetch CSI volumes, containing controller jobs + CSIVolumeByID(memdb.WatchSet, string) (*structs.CSIVolume, error) } // Planner interface is used to submit a task allocation plan. diff --git a/scheduler/stack.go b/scheduler/stack.go index 3e2b1b0b2..a02a876f3 100644 --- a/scheduler/stack.go +++ b/scheduler/stack.go @@ -51,6 +51,7 @@ type GenericStack struct { taskGroupConstraint *ConstraintChecker taskGroupDevices *DeviceChecker taskGroupHostVolumes *HostVolumeChecker + taskGroupCSIVolumes *CSIVolumeChecker distinctHostsConstraint *DistinctHostsIterator distinctPropertyConstraint *DistinctPropertyIterator @@ -131,6 +132,7 @@ func (s *GenericStack) Select(tg *structs.TaskGroup, options *SelectOptions) *Ra s.taskGroupConstraint.SetConstraints(tgConstr.constraints) s.taskGroupDevices.SetTaskGroup(tg) s.taskGroupHostVolumes.SetVolumes(tg.Volumes) + s.taskGroupCSIVolumes.SetVolumes(tg.Volumes) s.distinctHostsConstraint.SetTaskGroup(tg) s.distinctPropertyConstraint.SetTaskGroup(tg) s.wrappedChecks.SetTaskGroup(tg.Name) @@ -174,6 +176,7 @@ type SystemStack struct { taskGroupConstraint *ConstraintChecker taskGroupDevices *DeviceChecker taskGroupHostVolumes *HostVolumeChecker + taskGroupCSIVolumes *CSIVolumeChecker distinctPropertyConstraint *DistinctPropertyIterator binPack *BinPackIterator @@ -205,6 +208,9 @@ func NewSystemStack(ctx Context) *SystemStack { // Filter on task group host volumes s.taskGroupHostVolumes = NewHostVolumeChecker(ctx) + // Filter on available, healthy CSI plugins + s.taskGroupCSIVolumes = NewCSIVolumeChecker(ctx) + // Filter on task group devices s.taskGroupDevices = NewDeviceChecker(ctx) @@ -213,8 +219,11 @@ func NewSystemStack(ctx Context) *SystemStack { // previously been marked as eligible or ineligible. Generally this will be // checks that only needs to examine the single node to determine feasibility. jobs := []FeasibilityChecker{s.jobConstraint} - tgs := []FeasibilityChecker{s.taskGroupDrivers, s.taskGroupConstraint, s.taskGroupHostVolumes, s.taskGroupDevices} - s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs) + tgs := []FeasibilityChecker{s.taskGroupDrivers, s.taskGroupConstraint, + s.taskGroupHostVolumes, + s.taskGroupDevices} + avail := []FeasibilityChecker{s.taskGroupCSIVolumes} + s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs, avail) // Filter on distinct property constraints. s.distinctPropertyConstraint = NewDistinctPropertyIterator(ctx, s.wrappedChecks) @@ -267,6 +276,7 @@ func (s *SystemStack) Select(tg *structs.TaskGroup, options *SelectOptions) *Ran s.taskGroupConstraint.SetConstraints(tgConstr.constraints) s.taskGroupDevices.SetTaskGroup(tg) s.taskGroupHostVolumes.SetVolumes(tg.Volumes) + s.taskGroupCSIVolumes.SetVolumes(tg.Volumes) s.wrappedChecks.SetTaskGroup(tg.Name) s.distinctPropertyConstraint.SetTaskGroup(tg) s.binPack.SetTaskGroup(tg) diff --git a/scheduler/stack_oss.go b/scheduler/stack_oss.go index 12caa98d9..50ff5a523 100644 --- a/scheduler/stack_oss.go +++ b/scheduler/stack_oss.go @@ -34,13 +34,20 @@ func NewGenericStack(batch bool, ctx Context) *GenericStack { // Filter on task group host volumes s.taskGroupHostVolumes = NewHostVolumeChecker(ctx) + // Filter on available, healthy CSI plugins + s.taskGroupCSIVolumes = NewCSIVolumeChecker(ctx) + // Create the feasibility wrapper which wraps all feasibility checks in // which feasibility checking can be skipped if the computed node class has // previously been marked as eligible or ineligible. Generally this will be // checks that only needs to examine the single node to determine feasibility. jobs := []FeasibilityChecker{s.jobConstraint} - tgs := []FeasibilityChecker{s.taskGroupDrivers, s.taskGroupConstraint, s.taskGroupHostVolumes, s.taskGroupDevices} - s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs) + tgs := []FeasibilityChecker{s.taskGroupDrivers, + s.taskGroupConstraint, + s.taskGroupHostVolumes, + s.taskGroupDevices} + avail := []FeasibilityChecker{s.taskGroupCSIVolumes} + s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs, avail) // Filter on distinct host constraints. s.distinctHostsConstraint = NewDistinctHostsIterator(ctx, s.wrappedChecks) diff --git a/scheduler/stack_test.go b/scheduler/stack_test.go index 21ac18ee9..5b611036e 100644 --- a/scheduler/stack_test.go +++ b/scheduler/stack_test.go @@ -231,6 +231,78 @@ func TestServiceStack_Select_DriverFilter(t *testing.T) { } } +func TestServiceStack_Select_CSI(t *testing.T) { + state, ctx := testContext(t) + nodes := []*structs.Node{ + mock.Node(), + mock.Node(), + } + + // Create a volume in the state store + v := structs.NewCSIVolume("foo") + v.Namespace = structs.DefaultNamespace + v.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter + v.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem + v.PluginID = "bar" + err := state.CSIVolumeRegister(999, []*structs.CSIVolume{v}) + require.NoError(t, err) + + // Create a node with healthy fingerprints for both controller and node plugins + zero := nodes[0] + zero.CSIControllerPlugins = map[string]*structs.CSIInfo{"bar": { + PluginID: "bar", + Healthy: true, + RequiresTopologies: false, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsReadOnlyAttach: true, + SupportsListVolumes: true, + }, + }} + zero.CSINodePlugins = map[string]*structs.CSIInfo{"bar": { + PluginID: "bar", + Healthy: true, + RequiresTopologies: false, + NodeInfo: &structs.CSINodeInfo{ + ID: zero.ID, + MaxVolumes: 2, + AccessibleTopology: nil, + RequiresNodeStageVolume: false, + }, + }} + + // Add the node to the state store to index the healthy plugins and mark the volume "foo" healthy + err = state.UpsertNode(1000, zero) + require.NoError(t, err) + + // Use the node to build the stack and test + if err := zero.ComputeClass(); err != nil { + t.Fatalf("ComputedClass() failed: %v", err) + } + + stack := NewGenericStack(false, ctx) + stack.SetNodes(nodes) + + job := mock.Job() + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{"foo": { + Name: "bar", + Type: structs.VolumeTypeCSI, + Source: "foo", + ReadOnly: true, + }} + + stack.SetJob(job) + + selectOptions := &SelectOptions{} + node := stack.Select(job.TaskGroups[0], selectOptions) + if node == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } + + if node.Node != zero { + t.Fatalf("bad") + } +} + func TestServiceStack_Select_ConstraintFilter(t *testing.T) { _, ctx := testContext(t) nodes := []*structs.Node{ From a5c96ce2e1a2c6e765efb119c8da25d98c63ca26 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 31 Jan 2020 14:45:05 +0100 Subject: [PATCH 056/126] csi: Add grpc.CallOption support to NodePublishVolume --- plugins/csi/client.go | 4 ++-- plugins/csi/fake/client.go | 2 +- plugins/csi/plugin.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 9ac4013b5..16e9f485e 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -356,7 +356,7 @@ func (c *client) NodeUnstageVolume(ctx context.Context, volumeID string, staging return err } -func (c *client) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) error { +func (c *client) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest, opts ...grpc.CallOption) error { if c == nil { return fmt.Errorf("Client not initialized") } @@ -370,7 +370,7 @@ func (c *client) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRe // NodePublishVolume's response contains no extra data. If err == nil, we were // successful. - _, err := c.nodeClient.NodePublishVolume(ctx, req.ToCSIRepresentation()) + _, err := c.nodeClient.NodePublishVolume(ctx, req.ToCSIRepresentation(), opts...) return err } diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index 58fff1f6e..22eecc55e 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -185,7 +185,7 @@ func (c *Client) NodeUnstageVolume(ctx context.Context, volumeID string, staging return c.NextNodeUnstageVolumeErr } -func (c *Client) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) error { +func (c *Client) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest, opts ...grpc.CallOption) error { c.Mu.Lock() defer c.Mu.Unlock() diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 8312c89ea..c39990c05 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -58,7 +58,7 @@ type CSIPlugin interface { // NodePublishVolume is used to prepare a volume for use by an allocation. // if err == nil the response should be assumed to be successful. - NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest) error + NodePublishVolume(ctx context.Context, req *NodePublishVolumeRequest, opts ...grpc.CallOption) error // NodeUnpublishVolume is used to cleanup usage of a volume for an alloc. This // MUST be called before calling NodeUnstageVolume or ControllerUnpublishVolume From 80b7aa0a316bc0b11c0a00b073421814e6bec7bf Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 31 Jan 2020 14:45:48 +0100 Subject: [PATCH 057/126] volume_manager: Add support for publishing volumes --- client/pluginmanager/csimanager/interface.go | 2 + client/pluginmanager/csimanager/volume.go | 125 ++++++++++++++---- .../pluginmanager/csimanager/volume_test.go | 62 ++++++++- 3 files changed, 162 insertions(+), 27 deletions(-) diff --git a/client/pluginmanager/csimanager/interface.go b/client/pluginmanager/csimanager/interface.go index c266e2842..b969ff13e 100644 --- a/client/pluginmanager/csimanager/interface.go +++ b/client/pluginmanager/csimanager/interface.go @@ -13,6 +13,8 @@ var ( ) type MountInfo struct { + Source string + IsDevice bool } type VolumeMounter interface { diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index 2701e9159..b7838a369 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -59,6 +59,10 @@ func (v *volumeManager) stagingDirForVolume(vol *structs.CSIVolume) string { return filepath.Join(v.mountRoot, StagingDirName, vol.ID, "todo-provide-usage-options") } +func (v *volumeManager) allocDirForVolume(vol *structs.CSIVolume, alloc *structs.Allocation) string { + return filepath.Join(v.mountRoot, AllocSpecificDirName, alloc.ID, vol.ID, "todo-provide-usage-options") +} + // ensureStagingDir attempts to create a directory for use when staging a volume // and then validates that the path is not already a mount point for e.g an // existing volume stage. @@ -83,23 +87,31 @@ func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (bool, string, return !isNotMount, stagingPath, nil } -// stageVolume prepares a volume for use by allocations. When a plugin exposes -// the STAGE_UNSTAGE_VOLUME capability it MUST be called once-per-volume for a -// given usage mode before the volume can be NodePublish-ed. -func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) error { - logger := hclog.FromContext(ctx) - logger.Trace("Preparing volume staging environment") - existingMount, stagingPath, err := v.ensureStagingDir(vol) +// ensureAllocDir attempts to create a directory for use when publishing a volume +// and then validates that the path is not already a mount point (e.g when reattaching +// to existing allocs). +// +// Returns whether the directory is a pre-existing mountpoint, the publish path, +// and any errors that occurred. +func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Allocation) (bool, string, error) { + allocPath := v.allocDirForVolume(vol, alloc) + + // Make the alloc path, owned by the Nomad User + if err := os.MkdirAll(allocPath, 0700); err != nil && !os.IsExist(err) { + return false, "", fmt.Errorf("failed to create allocation directory for volume (%s): %v", vol.ID, err) + } + + // Validate that it is not already a mount point + m := mount.New() + isNotMount, err := m.IsNotAMountPoint(allocPath) if err != nil { - return err - } - logger.Trace("Volume staging environment", "pre-existing_mount", existingMount, "staging_path", stagingPath) - - if existingMount { - logger.Debug("re-using existing staging mount for volume", "staging_path", stagingPath) - return nil + return false, "", fmt.Errorf("mount point detection failed for volume (%s): %v", vol.ID, err) } + return !isNotMount, allocPath, nil +} + +func capabilitiesFromVolume(vol *structs.CSIVolume) (*csi.VolumeCapability, error) { var accessType csi.VolumeAccessType switch vol.AttachmentMode { case structs.CSIVolumeAttachmentModeBlockDevice: @@ -111,7 +123,7 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) // final check during transformation into the requisite CSI Data type to // defend against development bugs and corrupted state - and incompatible // nomad versions in the future. - return fmt.Errorf("Unknown volume attachment mode: %s", vol.AttachmentMode) + return nil, fmt.Errorf("Unknown volume attachment mode: %s", vol.AttachmentMode) } var accessMode csi.VolumeAccessMode @@ -131,7 +143,38 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) // final check during transformation into the requisite CSI Data type to // defend against development bugs and corrupted state - and incompatible // nomad versions in the future. - return fmt.Errorf("Unknown volume access mode: %v", vol.AccessMode) + return nil, fmt.Errorf("Unknown volume access mode: %v", vol.AccessMode) + } + + return &csi.VolumeCapability{ + AccessType: accessType, + AccessMode: accessMode, + VolumeMountOptions: &csi.VolumeMountOptions{ + // GH-7007: Currently we have no way to provide these + }, + }, nil +} + +// stageVolume prepares a volume for use by allocations. When a plugin exposes +// the STAGE_UNSTAGE_VOLUME capability it MUST be called once-per-volume for a +// given usage mode before the volume can be NodePublish-ed. +func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) (string, error) { + logger := hclog.FromContext(ctx) + logger.Trace("Preparing volume staging environment") + existingMount, stagingPath, err := v.ensureStagingDir(vol) + if err != nil { + return "", err + } + logger.Trace("Volume staging environment", "pre-existing_mount", existingMount, "staging_path", stagingPath) + + if existingMount { + logger.Debug("re-using existing staging mount for volume", "staging_path", stagingPath) + return stagingPath, nil + } + + capability, err := capabilitiesFromVolume(vol) + if err != nil { + return "", err } // We currently treat all explicit CSI NodeStageVolume errors (aside from timeouts, codes.ResourceExhausted, and codes.Unavailable) @@ -139,23 +182,50 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) // In the future, we can provide more useful error messages based on // different types of error. For error documentation see: // https://github.com/container-storage-interface/spec/blob/4731db0e0bc53238b93850f43ab05d9355df0fd9/spec.md#nodestagevolume-errors - return v.plugin.NodeStageVolume(ctx, + return stagingPath, v.plugin.NodeStageVolume(ctx, vol.ID, nil, /* TODO: Get publishContext from Server */ stagingPath, - &csi.VolumeCapability{ - AccessType: accessType, - AccessMode: accessMode, - VolumeMountOptions: &csi.VolumeMountOptions{ - // GH-7007: Currently we have no way to provide these - }, - }, + capability, grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), grpc_retry.WithMax(3), grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), ) } +func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, stagingPath string) (*MountInfo, error) { + logger := hclog.FromContext(ctx) + + preexistingMountForAlloc, targetPath, err := v.ensureAllocDir(vol, alloc) + if err != nil { + return nil, err + } + + if preexistingMountForAlloc { + logger.Debug("Re-using existing published volume for allocation") + return &MountInfo{Source: targetPath}, nil + } + + capabilities, err := capabilitiesFromVolume(vol) + if err != nil { + return nil, err + } + + err = v.plugin.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{ + VolumeID: vol.ID, + PublishContext: nil, // TODO: get publishcontext from server + StagingTargetPath: stagingPath, + TargetPath: targetPath, + VolumeCapability: capabilities, + }, + grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), + grpc_retry.WithMax(3), + grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), + ) + + return &MountInfo{Source: targetPath}, err +} + // MountVolume performs the steps required for using a given volume // configuration for the provided allocation. // @@ -164,14 +234,17 @@ func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) ctx = hclog.WithContext(ctx, logger) + var stagingPath string + var err error + if v.requiresStaging { - err := v.stageVolume(ctx, vol) + stagingPath, err = v.stageVolume(ctx, vol) if err != nil { return nil, err } } - return nil, fmt.Errorf("Unimplemented") + return v.publishVolume(ctx, vol, alloc, stagingPath) } // unstageVolume is the inverse operation of `stageVolume` and must be called diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index 687bfbc25..0decc484e 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -167,7 +167,7 @@ func TestVolumeManager_stageVolume(t *testing.T) { manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, true) ctx := context.Background() - err := manager.stageVolume(ctx, tc.Volume) + _, err := manager.stageVolume(ctx, tc.Volume) if tc.ExpectedErr != nil { require.EqualError(t, err, tc.ExpectedErr.Error()) @@ -230,3 +230,63 @@ func TestVolumeManager_unstageVolume(t *testing.T) { }) } } + +func TestVolumeManager_publishVolume(t *testing.T) { + t.Parallel() + cases := []struct { + Name string + Allocation *structs.Allocation + Volume *structs.CSIVolume + PluginErr error + ExpectedErr error + ExpectedCSICallCount int64 + }{ + { + Name: "Returns an error when the plugin returns an error", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + }, + PluginErr: errors.New("Some Unknown Error"), + ExpectedErr: errors.New("Some Unknown Error"), + ExpectedCSICallCount: 1, + }, + { + Name: "Happy Path", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + }, + PluginErr: nil, + ExpectedErr: nil, + ExpectedCSICallCount: 1, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + tmpPath := tmpDir(t) + defer os.RemoveAll(tmpPath) + + csiFake := &csifake.Client{} + csiFake.NextNodePublishVolumeErr = tc.PluginErr + + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, true) + ctx := context.Background() + + _, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation, "") + + if tc.ExpectedErr != nil { + require.EqualError(t, err, tc.ExpectedErr.Error()) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodePublishVolumeCallCount) + }) + } +} From bbf6a9c14b09c0005d100147e60f014b81e39b48 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Sun, 2 Feb 2020 10:13:51 +0100 Subject: [PATCH 058/126] volume_manager: cleanup of mount detection No functional changes, but makes ensure.*Dir follow a nicer return style. --- client/pluginmanager/csimanager/volume.go | 27 ++++++++++--------- .../pluginmanager/csimanager/volume_test.go | 2 +- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index b7838a369..f5a107ec1 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -69,22 +69,23 @@ func (v *volumeManager) allocDirForVolume(vol *structs.CSIVolume, alloc *structs // // Returns whether the directory is a pre-existing mountpoint, the staging path, // and any errors that occurred. -func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (bool, string, error) { +func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (string, bool, error) { stagingPath := v.stagingDirForVolume(vol) // Make the staging path, owned by the Nomad User if err := os.MkdirAll(stagingPath, 0700); err != nil && !os.IsExist(err) { - return false, "", fmt.Errorf("failed to create staging directory for volume (%s): %v", vol.ID, err) + return "", false, fmt.Errorf("failed to create staging directory for volume (%s): %v", vol.ID, err) + } // Validate that it is not already a mount point m := mount.New() isNotMount, err := m.IsNotAMountPoint(stagingPath) if err != nil { - return false, "", fmt.Errorf("mount point detection failed for volume (%s): %v", vol.ID, err) + return "", false, fmt.Errorf("mount point detection failed for volume (%s): %v", vol.ID, err) } - return !isNotMount, stagingPath, nil + return stagingPath, !isNotMount, nil } // ensureAllocDir attempts to create a directory for use when publishing a volume @@ -93,22 +94,22 @@ func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (bool, string, // // Returns whether the directory is a pre-existing mountpoint, the publish path, // and any errors that occurred. -func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Allocation) (bool, string, error) { +func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Allocation) (string, bool, error) { allocPath := v.allocDirForVolume(vol, alloc) // Make the alloc path, owned by the Nomad User if err := os.MkdirAll(allocPath, 0700); err != nil && !os.IsExist(err) { - return false, "", fmt.Errorf("failed to create allocation directory for volume (%s): %v", vol.ID, err) + return "", false, fmt.Errorf("failed to create allocation directory for volume (%s): %v", vol.ID, err) } // Validate that it is not already a mount point m := mount.New() isNotMount, err := m.IsNotAMountPoint(allocPath) if err != nil { - return false, "", fmt.Errorf("mount point detection failed for volume (%s): %v", vol.ID, err) + return "", false, fmt.Errorf("mount point detection failed for volume (%s): %v", vol.ID, err) } - return !isNotMount, allocPath, nil + return allocPath, !isNotMount, nil } func capabilitiesFromVolume(vol *structs.CSIVolume) (*csi.VolumeCapability, error) { @@ -161,13 +162,13 @@ func capabilitiesFromVolume(vol *structs.CSIVolume) (*csi.VolumeCapability, erro func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) (string, error) { logger := hclog.FromContext(ctx) logger.Trace("Preparing volume staging environment") - existingMount, stagingPath, err := v.ensureStagingDir(vol) + stagingPath, isMount, err := v.ensureStagingDir(vol) if err != nil { return "", err } - logger.Trace("Volume staging environment", "pre-existing_mount", existingMount, "staging_path", stagingPath) + logger.Trace("Volume staging environment", "pre-existing_mount", isMount, "staging_path", stagingPath) - if existingMount { + if isMount { logger.Debug("re-using existing staging mount for volume", "staging_path", stagingPath) return stagingPath, nil } @@ -196,12 +197,12 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, stagingPath string) (*MountInfo, error) { logger := hclog.FromContext(ctx) - preexistingMountForAlloc, targetPath, err := v.ensureAllocDir(vol, alloc) + targetPath, isMount, err := v.ensureAllocDir(vol, alloc) if err != nil { return nil, err } - if preexistingMountForAlloc { + if isMount { logger.Debug("Re-using existing published volume for allocation") return &MountInfo{Source: targetPath}, nil } diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index 0decc484e..10c828ca7 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -84,7 +84,7 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) { // Step 3: Now we can do some testing - detectedMount, path, testErr := manager.ensureStagingDir(tc.Volume) + path, detectedMount, testErr := manager.ensureStagingDir(tc.Volume) if tc.ExpectedErr != nil { require.EqualError(t, testErr, tc.ExpectedErr.Error()) return // We don't perform extra validation if an error was detected. From 7b675f89ace56465c476802431278d2dd81554be Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Mon, 3 Feb 2020 11:59:00 -0500 Subject: [PATCH 059/126] csi: fix index maintenance for CSIVolume and CSIPlugin tables (#7049) * state_store: csi volumes/plugins store the index in the txn * nomad: csi_endpoint_test require index checks need uint64() * nomad: other tests using int 0 not uint64(0) * structs: pass index into New, but not other struct methods * state_store: csi plugin indexes, use new struct interface * nomad: csi_endpoint_test check index/query meta (on explicit 0) * structs: NewCSIVolume takes an index arg now * scheduler/test: NewCSIVolume takes an index arg now --- nomad/csi_endpoint_test.go | 18 +++++++------ nomad/node_endpoint_test.go | 4 +-- nomad/periodic_endpoint_test.go | 4 +-- nomad/state/state_store.go | 48 ++++++++++++++++++++++++++++----- nomad/state/state_store_test.go | 6 +++-- nomad/structs/csi.go | 20 ++++++-------- nomad/structs/csi_test.go | 2 +- scheduler/feasible_test.go | 2 +- scheduler/stack_test.go | 3 ++- 9 files changed, 71 insertions(+), 36 deletions(-) diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index bb1cac005..c4d313e96 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -40,7 +40,7 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, PluginID: "minnie", }} - err := state.CSIVolumeRegister(0, vols) + err := state.CSIVolumeRegister(999, vols) require.NoError(t, err) // Create the register request @@ -56,7 +56,7 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { var resp structs.CSIVolumeGetResponse err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req, &resp) require.NoError(t, err) - require.NotEqual(t, 0, resp.Index) + require.Equal(t, uint64(999), resp.Index) require.Equal(t, vols[0].ID, resp.Volume.ID) } @@ -104,7 +104,7 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { resp1 := &structs.CSIVolumeRegisterResponse{} err := msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1) require.NoError(t, err) - require.NotEqual(t, 0, resp1.Index) + require.NotEqual(t, uint64(0), resp1.Index) // Get the volume back out policy = mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) @@ -120,7 +120,7 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { resp2 := &structs.CSIVolumeGetResponse{} err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2) require.NoError(t, err) - require.NotEqual(t, 0, resp2.Index) + require.Equal(t, resp1.Index, resp2.Index) require.Equal(t, vols[0].ID, resp2.Volume.ID) // Registration does not update @@ -190,7 +190,7 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, PluginID: "paddy", }} - err := state.CSIVolumeRegister(0, vols) + err := state.CSIVolumeRegister(999, vols) require.NoError(t, err) var resp structs.CSIVolumeListResponse @@ -204,7 +204,7 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { } err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp) require.NoError(t, err) - require.NotEqual(t, 0, resp.Index) + require.Equal(t, uint64(999), resp.Index) require.Equal(t, 2, len(resp.Volumes)) ids := map[string]bool{vols[0].ID: true, vols[1].ID: true} for _, v := range resp.Volumes { @@ -280,7 +280,7 @@ func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { resp1 := &structs.JobRegisterResponse{} err := msgpackrpc.CallWithCodec(codec, "Job.Register", req1, resp1) require.NoError(t, err) - require.NotEqual(t, 0, resp1.Index) + require.NotEqual(t, uint64(0), resp1.Index) // Get the plugin back out policy = mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) @@ -296,7 +296,8 @@ func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { resp2 := &structs.CSIPluginGetResponse{} err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) require.NoError(t, err) - require.NotEqual(t, 0, resp2.Index) + // The job is created with a higher index than the plugin, there's an extra raft write + require.Greater(t, resp1.Index, resp2.Index) // List plugins req3 := &structs.CSIPluginListRequest{ @@ -323,6 +324,7 @@ func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { resp4 := &structs.JobDeregisterResponse{} err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", req4, resp4) require.NoError(t, err) + require.Less(t, resp2.Index, resp4.Index) // Plugin is missing err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index 9d7ebe5ec..3ff670887 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -2141,7 +2141,7 @@ func TestClientEndpoint_UpdateAlloc(t *testing.T) { start := time.Now() err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2) require.Nil(err) - require.NotEqual(0, resp2.Index) + require.NotEqual(uint64(0), resp2.Index) if diff := time.Since(start); diff < batchUpdateInterval { t.Fatalf("too fast: %v", diff) @@ -3235,7 +3235,7 @@ func TestClientEndpoint_EmitEvents(t *testing.T) { var resp structs.GenericResponse err = msgpackrpc.CallWithCodec(codec, "Node.EmitEvents", &req, &resp) require.Nil(err) - require.NotEqual(0, resp.Index) + require.NotEqual(uint64(0), resp.Index) // Check for the node in the FSM ws := memdb.NewWatchSet() diff --git a/nomad/periodic_endpoint_test.go b/nomad/periodic_endpoint_test.go index 0f77b645f..45b70b880 100644 --- a/nomad/periodic_endpoint_test.go +++ b/nomad/periodic_endpoint_test.go @@ -116,7 +116,7 @@ func TestPeriodicEndpoint_Force_ACL(t *testing.T) { req.AuthToken = token.SecretID var resp structs.PeriodicForceResponse assert.Nil(msgpackrpc.CallWithCodec(codec, "Periodic.Force", req, &resp)) - assert.NotEqual(0, resp.Index) + assert.NotEqual(uint64(0), resp.Index) // Lookup the evaluation ws := memdb.NewWatchSet() @@ -132,7 +132,7 @@ func TestPeriodicEndpoint_Force_ACL(t *testing.T) { req.AuthToken = root.SecretID var resp structs.PeriodicForceResponse assert.Nil(msgpackrpc.CallWithCodec(codec, "Periodic.Force", req, &resp)) - assert.NotEqual(0, resp.Index) + assert.NotEqual(uint64(0), resp.Index) // Lookup the evaluation ws := memdb.NewWatchSet() diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 4d578bfc3..96cc3ae6a 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -954,13 +954,14 @@ func upsertNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) erro var plug *structs.CSIPlugin if raw != nil { - plug = raw.(*structs.CSIPlugin).Copy(index) + plug = raw.(*structs.CSIPlugin).Copy() } else { plug = structs.NewCSIPlugin(info.PluginID, index) plug.ControllerRequired = info.RequiresControllerPlugin } - plug.AddPlugin(node.ID, info, index) + plug.AddPlugin(node.ID, info) + plug.ModifyIndex = index err = txn.Insert("csi_plugins", plug) if err != nil { @@ -984,6 +985,10 @@ func upsertNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) erro } } + if err := txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + return nil } @@ -1010,8 +1015,9 @@ func deleteNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) erro return fmt.Errorf("csi_plugins missing plugin %s", id) } - plug := raw.(*structs.CSIPlugin).Copy(index) - plug.DeleteNode(node.ID, index) + plug := raw.(*structs.CSIPlugin).Copy() + plug.DeleteNode(node.ID) + plug.ModifyIndex = index if plug.IsEmpty() { err := txn.Delete("csi_plugins", plug) @@ -1026,6 +1032,10 @@ func deleteNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) erro } } + if err := txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + return nil } @@ -1644,6 +1654,10 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum } } + if err := txn.Insert("index", &IndexEntry{"csi_volumes", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + txn.Commit() return nil } @@ -1710,16 +1724,22 @@ func (s *StateStore) CSIVolumeClaim(index uint64, id string, alloc *structs.Allo return fmt.Errorf("volume row conversion error") } - volume := orig.Copy(index) + volume := orig.Copy() if !volume.Claim(claim, alloc) { return fmt.Errorf("volume max claim reached") } + volume.ModifyIndex = index + if err = txn.Insert("csi_volumes", volume); err != nil { return fmt.Errorf("volume update failed: %s: %v", id, err) } + if err = txn.Insert("index", &IndexEntry{"csi_volumes", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + txn.Commit() return nil } @@ -1744,6 +1764,10 @@ func (s *StateStore) CSIVolumeDeregister(index uint64, ids []string) error { } } + if err := txn.Insert("index", &IndexEntry{"csi_volumes", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + txn.Commit() return nil } @@ -1759,7 +1783,7 @@ func (s *StateStore) upsertJobCSIPlugins(index uint64, job *structs.Job, txn *me // Append this job to all of them for _, plug := range plugs { if plug.CreateIndex != index { - plug = plug.Copy(index) + plug = plug.Copy() } plug.AddJob(job) @@ -1770,6 +1794,10 @@ func (s *StateStore) upsertJobCSIPlugins(index uint64, job *structs.Job, txn *me } } + if err = txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + return nil } @@ -1819,10 +1847,12 @@ func (s *StateStore) deleteJobCSIPlugins(index uint64, job *structs.Job, txn *me // Remove this job from each plugin. If the plugin has no jobs left, remove it for _, plug := range plugs { if plug.CreateIndex != index { - plug = plug.Copy(index) + plug = plug.Copy() } plug.DeleteJob(job) + plug.ModifyIndex = index + if plug.IsEmpty() { err = txn.Delete("csi_plugins", plug) } else { @@ -1834,6 +1864,10 @@ func (s *StateStore) deleteJobCSIPlugins(index uint64, job *structs.Job, txn *me } } + if err = txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { + return fmt.Errorf("index update failed: %v", err) + } + return nil } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index df24da193..c965edf45 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2831,8 +2831,9 @@ func TestStateStore_CSIVolume(t *testing.T) { state := testStateStore(t) id0, id1 := uuid.Generate(), uuid.Generate() + index := uint64(1000) - v0 := structs.NewCSIVolume("foo") + v0 := structs.NewCSIVolume("foo", index) v0.ID = id0 v0.Namespace = "default" v0.PluginID = "minnie" @@ -2840,7 +2841,8 @@ func TestStateStore_CSIVolume(t *testing.T) { v0.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem - v1 := structs.NewCSIVolume("foo") + index++ + v1 := structs.NewCSIVolume("foo", index) v1.ID = id1 v1.Namespace = "default" v1.PluginID = "adam" diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index f0dcc0251..8dca6c9f8 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -185,9 +185,11 @@ type CSIVolListStub struct { } // NewCSIVolume creates the volume struct. No side-effects -func NewCSIVolume(pluginID string) *CSIVolume { +func NewCSIVolume(pluginID string, index uint64) *CSIVolume { out := &CSIVolume{ - ID: pluginID, + ID: pluginID, + CreateIndex: index, + ModifyIndex: index, } out.newStructs() @@ -250,11 +252,10 @@ func (v *CSIVolume) CanWrite() bool { } // Copy returns a copy of the volume, which shares only the Topologies slice -func (v *CSIVolume) Copy(index uint64) *CSIVolume { +func (v *CSIVolume) Copy() *CSIVolume { copy := *v out := © out.newStructs() - out.ModifyIndex = index for k, v := range v.ReadAllocs { out.ReadAllocs[k] = v @@ -486,11 +487,10 @@ func (p *CSIPlugin) newStructs() { p.Nodes = map[string]*CSIInfo{} } -func (p *CSIPlugin) Copy(index uint64) *CSIPlugin { +func (p *CSIPlugin) Copy() *CSIPlugin { copy := *p out := © out.newStructs() - out.ModifyIndex = index for ns, js := range p.Jobs { out.Jobs[ns] = map[string]*Job{} @@ -525,7 +525,7 @@ func (p *CSIPlugin) DeleteJob(job *Job) { // AddPlugin adds a single plugin running on the node. Called from state.NodeUpdate in a // transaction -func (p *CSIPlugin) AddPlugin(nodeID string, info *CSIInfo, index uint64) { +func (p *CSIPlugin) AddPlugin(nodeID string, info *CSIInfo) { if info.ControllerInfo != nil { prev, ok := p.Controllers[nodeID] if ok && prev.Healthy { @@ -547,13 +547,11 @@ func (p *CSIPlugin) AddPlugin(nodeID string, info *CSIInfo, index uint64) { p.NodesHealthy += 1 } } - - p.ModifyIndex = index } // DeleteNode removes all plugins from the node. Called from state.DeleteNode in a // transaction -func (p *CSIPlugin) DeleteNode(nodeID string, index uint64) { +func (p *CSIPlugin) DeleteNode(nodeID string) { prev, ok := p.Controllers[nodeID] if ok && prev.Healthy { p.ControllersHealthy -= 1 @@ -565,8 +563,6 @@ func (p *CSIPlugin) DeleteNode(nodeID string, index uint64) { p.NodesHealthy -= 1 } delete(p.Nodes, nodeID) - - p.ModifyIndex = index } type CSIPluginListStub struct { diff --git a/nomad/structs/csi_test.go b/nomad/structs/csi_test.go index 7119b7b72..7685b41b4 100644 --- a/nomad/structs/csi_test.go +++ b/nomad/structs/csi_test.go @@ -7,7 +7,7 @@ import ( ) func TestCSIVolumeClaim(t *testing.T) { - vol := NewCSIVolume("") + vol := NewCSIVolume("", 0) vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter vol.Healthy = true diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index f738f2dc7..f5c176526 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -281,7 +281,7 @@ func TestCSIVolumeChecker(t *testing.T) { // Create the volume in the state store vid := "volume-id" - vol := structs.NewCSIVolume(vid) + vol := structs.NewCSIVolume(vid, index) vol.PluginID = "foo" vol.Namespace = structs.DefaultNamespace vol.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter diff --git a/scheduler/stack_test.go b/scheduler/stack_test.go index 5b611036e..db12c5820 100644 --- a/scheduler/stack_test.go +++ b/scheduler/stack_test.go @@ -239,7 +239,8 @@ func TestServiceStack_Select_CSI(t *testing.T) { } // Create a volume in the state store - v := structs.NewCSIVolume("foo") + index := uint64(999) + v := structs.NewCSIVolume("foo", index) v.Namespace = structs.DefaultNamespace v.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem From 421d7ed2e412923cae1321f357b9b2700c753327 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Mon, 3 Feb 2020 15:40:28 -0500 Subject: [PATCH 060/126] nomad: csi_endpoint send register & deregister requests to raft (#7059) --- nomad/csi_endpoint.go | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index c1d83266d..87d6a9a8c 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -210,26 +210,30 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru return structs.ErrPermissionDenied } + ws := memdb.NewWatchSet() + // This is the only namespace we ACL checked, force all the volumes to use it - for _, v := range args.Volumes { - v.Namespace = args.RequestNamespace() - if err = v.Validate(); err != nil { + for _, vol := range args.Volumes { + vol.Namespace = args.RequestNamespace() + if err = vol.Validate(); err != nil { return err } + + exists, _ := v.srv.State().CSIVolumeByID(ws, vol.ID) + if exists != nil { + return fmt.Errorf("volume %s already exists", vol.ID) + } } - state := v.srv.State() - index, err := state.LatestIndex() + _, index, err := v.srv.raftApply(structs.CSIVolumeRegisterRequestType, args) if err != nil { + v.logger.Error("csi raft apply failed", "error", err, "method", "register") return err } - err = state.CSIVolumeRegister(index, args.Volumes) - if err != nil { - return err - } - - return v.srv.replySetIndex(csiVolumeTable, &reply.QueryMeta) + reply.Index = index + v.srv.setQueryMeta(&reply.QueryMeta) + return nil } // Deregister removes a set of volumes @@ -251,18 +255,15 @@ func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply * return structs.ErrPermissionDenied } - state := v.srv.State() - index, err := state.LatestIndex() + _, index, err := v.srv.raftApply(structs.CSIVolumeDeregisterRequestType, args) if err != nil { + v.logger.Error("csi raft apply failed", "error", err, "method", "deregister") return err } - err = state.CSIVolumeDeregister(index, args.VolumeIDs) - if err != nil { - return err - } - - return v.srv.replySetIndex(csiVolumeTable, &reply.QueryMeta) + reply.Index = index + v.srv.setQueryMeta(&reply.QueryMeta) + return nil } // CSIPlugin wraps the structs.CSIPlugin with request data and server context From d4cd272de38a91414461c193cdcac4be4eaf5619 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 4 Feb 2020 08:00:00 -0500 Subject: [PATCH 061/126] csi: implement VolumeClaimRPC (#7048) When the client receives an allocation which includes a CSI volume, the alloc runner will block its main `Run` loop. The alloc runner will issue a `VolumeClaim` RPC to the Nomad servers. This changeset implements the portions of the `VolumeClaim` RPC endpoint that have not been previously completed. --- nomad/csi_endpoint.go | 49 +++++++++++--- nomad/csi_endpoint_test.go | 135 +++++++++++++++++++++++++++++++++++++ nomad/structs/csi.go | 4 ++ 3 files changed, 179 insertions(+), 9 deletions(-) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 87d6a9a8c..d96326f50 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -210,26 +210,22 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru return structs.ErrPermissionDenied } - ws := memdb.NewWatchSet() - // This is the only namespace we ACL checked, force all the volumes to use it for _, vol := range args.Volumes { vol.Namespace = args.RequestNamespace() if err = vol.Validate(); err != nil { return err } - - exists, _ := v.srv.State().CSIVolumeByID(ws, vol.ID) - if exists != nil { - return fmt.Errorf("volume %s already exists", vol.ID) - } } - _, index, err := v.srv.raftApply(structs.CSIVolumeRegisterRequestType, args) + resp, index, err := v.srv.raftApply(structs.CSIVolumeRegisterRequestType, args) if err != nil { v.logger.Error("csi raft apply failed", "error", err, "method", "register") return err } + if respErr, ok := resp.(error); ok { + return respErr + } reply.Index = index v.srv.setQueryMeta(&reply.QueryMeta) @@ -255,11 +251,46 @@ func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply * return structs.ErrPermissionDenied } - _, index, err := v.srv.raftApply(structs.CSIVolumeDeregisterRequestType, args) + resp, index, err := v.srv.raftApply(structs.CSIVolumeDeregisterRequestType, args) if err != nil { v.logger.Error("csi raft apply failed", "error", err, "method", "deregister") return err } + if respErr, ok := resp.(error); ok { + return respErr + } + + reply.Index = index + v.srv.setQueryMeta(&reply.QueryMeta) + return nil +} + +// Claim claims a volume +func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CSIVolumeClaimResponse) error { + if done, err := v.srv.forward("CSIVolume.Claim", args, args, reply); done { + return err + } + + aclObj, err := v.srv.WriteACLObj(&args.WriteRequest) + if err != nil { + return err + } + + metricsStart := time.Now() + defer metrics.MeasureSince([]string{"nomad", "volume", "claim"}, metricsStart) + + if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSIAccess) { + return structs.ErrPermissionDenied + } + + resp, index, err := v.srv.raftApply(structs.CSIVolumeClaimRequestType, args) + if err != nil { + v.logger.Error("csi raft apply failed", "error", err, "method", "claim") + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } reply.Index = index v.srv.setQueryMeta(&reply.QueryMeta) diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index c4d313e96..7c88a91d5 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -1,6 +1,7 @@ package nomad import ( + "fmt" "testing" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" @@ -147,6 +148,140 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { require.Nil(t, resp2.Volume) } +// TestCSIVolumeEndpoint_Claim exercises the VolumeClaim RPC, verifying that claims +// are honored only if the volume exists, the mode is permitted, and the volume +// is schedulable according to its count of claims. +func TestCSIVolumeEndpoint_Claim(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + state := srv.fsm.State() + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + srv.config.ACLEnabled = true + policy := mock.NamespacePolicy(ns, "", + []string{acl.NamespaceCapabilityCSICreateVolume, acl.NamespaceCapabilityCSIAccess}) + accessToken := mock.CreatePolicyAndToken(t, state, 1001, + acl.NamespaceCapabilityCSIAccess, policy) + codec := rpcClient(t, srv) + id0 := uuid.Generate() + + // Create an initial volume claim request; we expect it to fail + // because there's no such volume yet. + claimReq := &structs.CSIVolumeClaimRequest{ + VolumeID: id0, + Allocation: mock.BatchAlloc(), + Claim: structs.CSIVolumeClaimWrite, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + AuthToken: accessToken.SecretID, + }, + } + claimResp := &structs.CSIVolumeClaimResponse{} + err := msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.EqualError(t, err, fmt.Sprintf("volume not found: %s", id0), + "expected 'volume not found' error because volume hasn't yet been created") + + // Create a client nodes with a plugin + node := mock.Node() + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": {PluginID: "minnie", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + plugin := structs.NewCSIPlugin("minnie", 1) + plugin.ControllerRequired = false + plugin.AddPlugin(node.ID, &structs.CSIInfo{}) + err = state.UpsertNode(3, node) + require.NoError(t, err) + + // Create the volume for the plugin + vols := []*structs.CSIVolume{{ + ID: id0, + Namespace: "notTheNamespace", + PluginID: "minnie", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + Topologies: []*structs.CSITopology{{ + Segments: map[string]string{"foo": "bar"}, + }}, + }} + + createToken := mock.CreatePolicyAndToken(t, state, 1001, + acl.NamespaceCapabilityCSICreateVolume, policy) + // Register the volume + volReq := &structs.CSIVolumeRegisterRequest{ + Volumes: vols, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + AuthToken: createToken.SecretID, + }, + } + volResp := &structs.CSIVolumeRegisterResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", volReq, volResp) + require.NoError(t, err) + + // Verify we can get the volume back out + getToken := mock.CreatePolicyAndToken(t, state, 1001, + acl.NamespaceCapabilityCSIAccess, policy) + volGetReq := &structs.CSIVolumeGetRequest{ + ID: id0, + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: getToken.SecretID, + }, + } + volGetResp := &structs.CSIVolumeGetResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp) + require.NoError(t, err) + require.Equal(t, id0, volGetResp.Volume.ID) + + // Now our claim should succeed + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.NoError(t, err) + + // Verify the claim was set + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp) + require.NoError(t, err) + require.Equal(t, id0, volGetResp.Volume.ID) + require.Len(t, volGetResp.Volume.ReadAllocs, 0) + require.Len(t, volGetResp.Volume.WriteAllocs, 1) + + // Make another writer claim for a different alloc + claimReq.Allocation = mock.BatchAlloc() + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.EqualError(t, err, "volume max claim reached", + "expected 'volume max claim reached' because we only allow 1 writer") + + // Fix the mode and our claim will succeed + claimReq.Claim = structs.CSIVolumeClaimRead + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.NoError(t, err) + + // Verify the new claim was set + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp) + require.NoError(t, err) + require.Equal(t, id0, volGetResp.Volume.ID) + require.Len(t, volGetResp.Volume.ReadAllocs, 1) + require.Len(t, volGetResp.Volume.WriteAllocs, 1) + + // Claim is idempotent + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.NoError(t, err) + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp) + require.NoError(t, err) + require.Equal(t, id0, volGetResp.Volume.ID) + require.Len(t, volGetResp.Volume.ReadAllocs, 1) + require.Len(t, volGetResp.Volume.WriteAllocs, 1) +} + func TestCSIVolumeEndpoint_List(t *testing.T) { t.Parallel() srv, shutdown := TestServer(t, func(c *Config) { diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 8dca6c9f8..6f8dbc6d9 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -430,6 +430,10 @@ type CSIVolumeClaimRequest struct { WriteRequest } +type CSIVolumeClaimResponse struct { + QueryMeta +} + type CSIVolumeListRequest struct { PluginID string QueryOptions From fb1aad66ee766c9bec9cebfe3aa5b7fe462c43fd Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 5 Feb 2020 13:27:37 -0500 Subject: [PATCH 062/126] csi: implement releasing volume claims for terminal allocs (#7076) When an alloc is marked terminal, and after node unstage/unpublish have been called, the client will sync the terminal alloc state with the server via `Node.UpdateAlloc` RPC. This changeset implements releasing the volume claim for each volume associated with the terminal alloc. It doesn't yet implement the RPC call we need to make to the `ControllerUnpublishVolume` CSI RPC. --- nomad/node_endpoint.go | 101 ++++++++++++++++++++++++---------- nomad/node_endpoint_test.go | 106 ++++++++++++++++++++++++++++++++++++ 2 files changed, 177 insertions(+), 30 deletions(-) diff --git a/nomad/node_endpoint.go b/nomad/node_endpoint.go index e26891349..6639c0c0d 100644 --- a/nomad/node_endpoint.go +++ b/nomad/node_endpoint.go @@ -1081,39 +1081,54 @@ func (n *Node) UpdateAlloc(args *structs.AllocUpdateRequest, reply *structs.Gene now := time.Now() var evals []*structs.Evaluation - for _, alloc := range args.Alloc { - alloc.ModifyTime = now.UTC().UnixNano() + for _, allocToUpdate := range args.Alloc { + allocToUpdate.ModifyTime = now.UTC().UnixNano() + + if !allocToUpdate.TerminalStatus() { + continue + } + + alloc, _ := n.srv.State().AllocByID(nil, allocToUpdate.ID) + if alloc == nil { + continue + } + + job, err := n.srv.State().JobByID(nil, alloc.Namespace, alloc.JobID) + if err != nil { + n.logger.Error("UpdateAlloc unable to find job", "job", alloc.JobID, "error", err) + continue + } + if job == nil { + n.logger.Debug("UpdateAlloc unable to find job", "job", alloc.JobID) + continue + } + + taskGroup := job.LookupTaskGroup(alloc.TaskGroup) + if taskGroup == nil { + continue + } + + err = n.unclaimVolumesForTerminalAllocs(args, alloc, taskGroup) + if err != nil { + n.logger.Error("UpdateAlloc unable to release CSI volume", + "alloc", alloc.ID, "error", err) + continue + } // Add an evaluation if this is a failed alloc that is eligible for rescheduling - if alloc.ClientStatus == structs.AllocClientStatusFailed { - // Only create evaluations if this is an existing alloc, - // and eligible as per its task group's ReschedulePolicy - if existingAlloc, _ := n.srv.State().AllocByID(nil, alloc.ID); existingAlloc != nil { - job, err := n.srv.State().JobByID(nil, existingAlloc.Namespace, existingAlloc.JobID) - if err != nil { - n.logger.Error("UpdateAlloc unable to find job", "job", existingAlloc.JobID, "error", err) - continue - } - if job == nil { - n.logger.Debug("UpdateAlloc unable to find job", "job", existingAlloc.JobID) - continue - } - taskGroup := job.LookupTaskGroup(existingAlloc.TaskGroup) - if taskGroup != nil && existingAlloc.FollowupEvalID == "" && existingAlloc.RescheduleEligible(taskGroup.ReschedulePolicy, now) { - eval := &structs.Evaluation{ - ID: uuid.Generate(), - Namespace: existingAlloc.Namespace, - TriggeredBy: structs.EvalTriggerRetryFailedAlloc, - JobID: existingAlloc.JobID, - Type: job.Type, - Priority: job.Priority, - Status: structs.EvalStatusPending, - CreateTime: now.UTC().UnixNano(), - ModifyTime: now.UTC().UnixNano(), - } - evals = append(evals, eval) - } + if allocToUpdate.ClientStatus == structs.AllocClientStatusFailed && alloc.FollowupEvalID == "" && alloc.RescheduleEligible(taskGroup.ReschedulePolicy, now) { + eval := &structs.Evaluation{ + ID: uuid.Generate(), + Namespace: alloc.Namespace, + TriggeredBy: structs.EvalTriggerRetryFailedAlloc, + JobID: alloc.JobID, + Type: job.Type, + Priority: job.Priority, + Status: structs.EvalStatusPending, + CreateTime: now.UTC().UnixNano(), + ModifyTime: now.UTC().UnixNano(), } + evals = append(evals, eval) } } @@ -1155,6 +1170,32 @@ func (n *Node) UpdateAlloc(args *structs.AllocUpdateRequest, reply *structs.Gene return nil } +// unclaimVolumesForTerminalAllocs unpublishes and unclaims CSI volumes +// that belong to the alloc if it is terminal. +func (n *Node) unclaimVolumesForTerminalAllocs(args *structs.AllocUpdateRequest, alloc *structs.Allocation, taskGroup *structs.TaskGroup) error { + for _, volume := range taskGroup.Volumes { + + // TODO(tgross): we also need to call ControllerUnpublishVolume CSI RPC here + // but the server-side CSI client + routing hasn't been implemented yet + + req := &structs.CSIVolumeClaimRequest{ + VolumeID: volume.Source, + Allocation: alloc, + Claim: structs.CSIVolumeClaimRelease, + WriteRequest: args.WriteRequest, + } + + resp, _, err := n.srv.raftApply(structs.CSIVolumeClaimRequestType, req) + if err != nil { + return err + } + if respErr, ok := resp.(error); ok { + return respErr + } + } + return nil +} + // batchUpdate is used to update all the allocations func (n *Node) batchUpdate(future *structs.BatchFuture, updates []*structs.Allocation, evals []*structs.Evaluation) { // Group pending evals by jobID to prevent creating unnecessary evals diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index 3ff670887..7f5f04882 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -2312,6 +2312,112 @@ func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) { } } +func TestClientEndpoint_UpdateAlloc_UnclaimVolumes(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + codec := rpcClient(t, srv) + state := srv.fsm.State() + ws := memdb.NewWatchSet() + + // Create a client node with a plugin + node := mock.Node() + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "csi-plugin-example": {PluginID: "csi-plugin-example", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + plugin := structs.NewCSIPlugin("csi-plugin-example", 1) + plugin.ControllerRequired = false + plugin.AddPlugin(node.ID, &structs.CSIInfo{}) + err := state.UpsertNode(99, node) + require.NoError(t, err) + + // Create the volume for the plugin + volId0 := uuid.Generate() + vols := []*structs.CSIVolume{{ + ID: volId0, + Namespace: "notTheNamespace", + PluginID: "csi-plugin-example", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + Topologies: []*structs.CSITopology{{ + Segments: map[string]string{"foo": "bar"}, + }}, + }} + err = state.CSIVolumeRegister(4, vols) + require.NoError(t, err) + + // Create a job with 2 allocations + job := mock.Job() + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + "_": { + Name: "someVolume", + Type: "", + Source: volId0, + ReadOnly: false, + }, + } + err = state.UpsertJob(101, job) + require.NoError(t, err) + + alloc1 := mock.Alloc() + alloc1.JobID = job.ID + alloc1.NodeID = node.ID + err = state.UpsertJobSummary(102, mock.JobSummary(alloc1.JobID)) + require.NoError(t, err) + alloc1.TaskGroup = job.TaskGroups[0].Name + + alloc2 := mock.Alloc() + alloc2.JobID = job.ID + alloc2.NodeID = node.ID + err = state.UpsertJobSummary(103, mock.JobSummary(alloc2.JobID)) + require.NoError(t, err) + alloc2.TaskGroup = job.TaskGroups[0].Name + + err = state.UpsertAllocs(104, []*structs.Allocation{alloc1, alloc2}) + require.NoError(t, err) + + // Verify no claims are set + vol, err := state.CSIVolumeByID(ws, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 0) + require.Len(t, vol.WriteAllocs, 0) + + // Claim the volumes and verify the claims were set + err = state.CSIVolumeClaim(105, volId0, alloc1, structs.CSIVolumeClaimWrite) + require.NoError(t, err) + err = state.CSIVolumeClaim(106, volId0, alloc2, structs.CSIVolumeClaimRead) + require.NoError(t, err) + vol, err = state.CSIVolumeByID(ws, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 1) + require.Len(t, vol.WriteAllocs, 1) + + // Update the 1st alloc as terminal/failed + alloc1.ClientStatus = structs.AllocClientStatusFailed + err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", + &structs.AllocUpdateRequest{ + Alloc: []*structs.Allocation{alloc1}, + WriteRequest: structs.WriteRequest{Region: "global"}, + }, &structs.NodeAllocsResponse{}) + require.NoError(t, err) + + // Lookup the alloc and verify status was updated + out, err := state.AllocByID(ws, alloc1.ID) + require.NoError(t, err) + require.Equal(t, structs.AllocClientStatusFailed, out.ClientStatus) + + // Verify the claim was released + vol, err = state.CSIVolumeByID(ws, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 1) + require.Len(t, vol.WriteAllocs, 0) +} + func TestClientEndpoint_CreateNodeEvals(t *testing.T) { t.Parallel() From ee85c468c014b943b493a76976a4197a618dbcb8 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 5 Feb 2020 12:13:08 +0100 Subject: [PATCH 063/126] csimanager: Instantiate fingerprint manager's csiclient --- client/pluginmanager/csimanager/instance.go | 1 + 1 file changed, 1 insertion(+) diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index 7ebf683f9..e6d96d663 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -71,6 +71,7 @@ func (i *instanceManager) run() { return } i.client = c + i.fp.client = c go i.runLoop() } From 94e87fbe9c0372147daa8f81097ec8536a97fb56 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Thu, 6 Feb 2020 15:46:47 +0100 Subject: [PATCH 064/126] csimanager: Cleanup volumemanager setup --- .../pluginmanager/csimanager/fingerprint.go | 17 +++++- client/pluginmanager/csimanager/instance.go | 61 ++++++++----------- 2 files changed, 40 insertions(+), 38 deletions(-) diff --git a/client/pluginmanager/csimanager/fingerprint.go b/client/pluginmanager/csimanager/fingerprint.go index d86e68299..b9596b9ce 100644 --- a/client/pluginmanager/csimanager/fingerprint.go +++ b/client/pluginmanager/csimanager/fingerprint.go @@ -27,6 +27,15 @@ type pluginFingerprinter struct { fingerprintController bool hadFirstSuccessfulFingerprint bool + // hadFirstSuccessfulFingerprintCh is closed the first time a fingerprint + // is completed successfully. + hadFirstSuccessfulFingerprintCh chan struct{} + + // requiresStaging is set on a first successful fingerprint. It allows the + // csimanager to efficiently query this as it shouldn't change after a plugin + // is started. Removing this bool will require storing a cache of recent successful + // results that can be used by subscribers of the `hadFirstSuccessfulFingerprintCh`. + requiresStaging bool } func (p *pluginFingerprinter) fingerprint(ctx context.Context) *structs.CSIInfo { @@ -61,7 +70,13 @@ func (p *pluginFingerprinter) fingerprint(ctx context.Context) *structs.CSIInfo info.HealthDescription = fmt.Sprintf("failed fingerprinting with error: %v", err) } else { info = fp - p.hadFirstSuccessfulFingerprint = true + if !p.hadFirstSuccessfulFingerprint { + p.hadFirstSuccessfulFingerprint = true + if p.fingerprintNode { + p.requiresStaging = info.NodeInfo.RequiresNodeStageVolume + } + close(p.hadFirstSuccessfulFingerprintCh) + } } return info diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index e6d96d663..736c76c56 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -2,7 +2,6 @@ package csimanager import ( "context" - "sync" "time" "github.com/hashicorp/go-hclog" @@ -31,9 +30,7 @@ type instanceManager struct { fp *pluginFingerprinter volumeManager *volumeManager - volumeManagerMu sync.RWMutex volumeManagerSetupCh chan struct{} - volumeManagerSetup bool client csi.CSIPlugin } @@ -47,10 +44,11 @@ func newInstanceManager(logger hclog.Logger, updater UpdateNodeCSIInfoFunc, p *d updater: updater, fp: &pluginFingerprinter{ - logger: logger.Named("fingerprinter"), - info: p, - fingerprintNode: p.Type == dynamicplugins.PluginTypeCSINode, - fingerprintController: p.Type == dynamicplugins.PluginTypeCSIController, + logger: logger.Named("fingerprinter"), + info: p, + fingerprintNode: p.Type == dynamicplugins.PluginTypeCSINode, + fingerprintController: p.Type == dynamicplugins.PluginTypeCSIController, + hadFirstSuccessfulFingerprintCh: make(chan struct{}), }, mountPoint: p.Options["MountPoint"], @@ -73,30 +71,34 @@ func (i *instanceManager) run() { i.client = c i.fp.client = c + go i.setupVolumeManager() go i.runLoop() } +func (i *instanceManager) setupVolumeManager() { + if i.info.Type != dynamicplugins.PluginTypeCSINode { + i.logger.Debug("Skipping volume manager setup - not managing a Node plugin", "type", i.info.Type) + return + } + + select { + case <-i.shutdownCtx.Done(): + return + case <-i.fp.hadFirstSuccessfulFingerprintCh: + i.volumeManager = newVolumeManager(i.logger, i.client, i.mountPoint, i.fp.requiresStaging) + i.logger.Debug("Setup volume manager") + close(i.volumeManagerSetupCh) + return + } +} + // VolumeMounter returns the volume manager that is configured for the given plugin // instance. If called before the volume manager has been setup, it will block until // the volume manager is ready or the context is closed. func (i *instanceManager) VolumeMounter(ctx context.Context) (VolumeMounter, error) { - var vm VolumeMounter - i.volumeManagerMu.RLock() - if i.volumeManagerSetup { - vm = i.volumeManager - } - i.volumeManagerMu.RUnlock() - - if vm != nil { - return vm, nil - } - select { case <-i.volumeManagerSetupCh: - i.volumeManagerMu.RLock() - vm = i.volumeManager - i.volumeManagerMu.RUnlock() - return vm, nil + return i.volumeManager, nil case <-ctx.Done(): return nil, ctx.Err() } @@ -124,21 +126,6 @@ func (i *instanceManager) runLoop() { cancelFn() i.updater(i.info.Name, info) - // TODO: refactor this lock into a faster, goroutine-local check - i.volumeManagerMu.RLock() - // When we've had a successful fingerprint, and the volume manager is not yet setup, - // and one is required (we're running a node plugin), then set one up now. - if i.fp.hadFirstSuccessfulFingerprint && !i.volumeManagerSetup && i.fp.fingerprintNode { - i.volumeManagerMu.RUnlock() - i.volumeManagerMu.Lock() - i.volumeManager = newVolumeManager(i.logger, i.client, i.mountPoint, info.NodeInfo.RequiresNodeStageVolume) - i.volumeManagerSetup = true - close(i.volumeManagerSetupCh) - i.volumeManagerMu.Unlock() - } else { - i.volumeManagerMu.RUnlock() - } - timer.Reset(managerFingerprintInterval) } } From 3bff9fefae53840617ed476c180d2c2792aad36e Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Thu, 6 Feb 2020 15:26:29 +0100 Subject: [PATCH 065/126] csi: Provide plugin-scoped paths during RPCs When providing paths to plugins, the path needs to be in the scope of the plugins container, rather than that of the host. Here we enable that by providing the mount point through the plugin registration and then use it when constructing request target paths. --- .../taskrunner/plugin_supervisor_hook.go | 3 +- client/pluginmanager/csimanager/instance.go | 7 +- client/pluginmanager/csimanager/volume.go | 76 ++++++++++--------- .../pluginmanager/csimanager/volume_test.go | 14 ++-- 4 files changed, 57 insertions(+), 43 deletions(-) diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index 6c63c4c96..e66cb95e4 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -258,7 +258,8 @@ func (h *csiPluginSupervisorHook) registerPlugin(socketPath string) (func(), err SocketPath: socketPath, }, Options: map[string]string{ - "MountPoint": h.mountPoint, + "MountPoint": h.mountPoint, + "ContainerMountPoint": h.task.CSIPluginConfig.MountDir, }, } } diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index 736c76c56..bc6c63f0f 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -27,6 +27,10 @@ type instanceManager struct { // stored and where mount points will be created mountPoint string + // containerMountPoint is the location _inside_ the plugin container that the + // `mountPoint` is bound in to. + containerMountPoint string + fp *pluginFingerprinter volumeManager *volumeManager @@ -51,7 +55,8 @@ func newInstanceManager(logger hclog.Logger, updater UpdateNodeCSIInfoFunc, p *d hadFirstSuccessfulFingerprintCh: make(chan struct{}), }, - mountPoint: p.Options["MountPoint"], + mountPoint: p.Options["MountPoint"], + containerMountPoint: p.Options["ContainerMountPoint"], volumeManagerSetupCh: make(chan struct{}), diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index f5a107ec1..2650e2fa0 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -40,27 +40,32 @@ type volumeManager struct { // e.g /opt/nomad.d/statedir/csi/my-csi-plugin/ mountRoot string + // containerMountPoint is the location _inside_ the plugin container that the + // `mountRoot` is bound in to. + containerMountPoint string + // requiresStaging shows whether the plugin requires that the volume manager // calls NodeStageVolume and NodeUnstageVolume RPCs during setup and teardown requiresStaging bool } -func newVolumeManager(logger hclog.Logger, plugin csi.CSIPlugin, rootDir string, requiresStaging bool) *volumeManager { +func newVolumeManager(logger hclog.Logger, plugin csi.CSIPlugin, rootDir, containerRootDir string, requiresStaging bool) *volumeManager { return &volumeManager{ - logger: logger.Named("volume_manager"), - plugin: plugin, - mountRoot: rootDir, - requiresStaging: requiresStaging, - volumes: make(map[string]interface{}), + logger: logger.Named("volume_manager"), + plugin: plugin, + mountRoot: rootDir, + containerMountPoint: containerRootDir, + requiresStaging: requiresStaging, + volumes: make(map[string]interface{}), } } -func (v *volumeManager) stagingDirForVolume(vol *structs.CSIVolume) string { - return filepath.Join(v.mountRoot, StagingDirName, vol.ID, "todo-provide-usage-options") +func (v *volumeManager) stagingDirForVolume(root string, vol *structs.CSIVolume) string { + return filepath.Join(root, StagingDirName, vol.ID, "todo-provide-usage-options") } -func (v *volumeManager) allocDirForVolume(vol *structs.CSIVolume, alloc *structs.Allocation) string { - return filepath.Join(v.mountRoot, AllocSpecificDirName, alloc.ID, vol.ID, "todo-provide-usage-options") +func (v *volumeManager) allocDirForVolume(root string, vol *structs.CSIVolume, alloc *structs.Allocation) string { + return filepath.Join(root, AllocSpecificDirName, alloc.ID, vol.ID, "todo-provide-usage-options") } // ensureStagingDir attempts to create a directory for use when staging a volume @@ -70,7 +75,7 @@ func (v *volumeManager) allocDirForVolume(vol *structs.CSIVolume, alloc *structs // Returns whether the directory is a pre-existing mountpoint, the staging path, // and any errors that occurred. func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (string, bool, error) { - stagingPath := v.stagingDirForVolume(vol) + stagingPath := v.stagingDirForVolume(v.mountRoot, vol) // Make the staging path, owned by the Nomad User if err := os.MkdirAll(stagingPath, 0700); err != nil && !os.IsExist(err) { @@ -95,7 +100,7 @@ func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (string, bool, // Returns whether the directory is a pre-existing mountpoint, the publish path, // and any errors that occurred. func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Allocation) (string, bool, error) { - allocPath := v.allocDirForVolume(vol, alloc) + allocPath := v.allocDirForVolume(v.mountRoot, vol, alloc) // Make the alloc path, owned by the Nomad User if err := os.MkdirAll(allocPath, 0700); err != nil && !os.IsExist(err) { @@ -159,23 +164,25 @@ func capabilitiesFromVolume(vol *structs.CSIVolume) (*csi.VolumeCapability, erro // stageVolume prepares a volume for use by allocations. When a plugin exposes // the STAGE_UNSTAGE_VOLUME capability it MUST be called once-per-volume for a // given usage mode before the volume can be NodePublish-ed. -func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) (string, error) { +func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) error { logger := hclog.FromContext(ctx) logger.Trace("Preparing volume staging environment") - stagingPath, isMount, err := v.ensureStagingDir(vol) + hostStagingPath, isMount, err := v.ensureStagingDir(vol) if err != nil { - return "", err + return err } - logger.Trace("Volume staging environment", "pre-existing_mount", isMount, "staging_path", stagingPath) + pluginStagingPath := v.stagingDirForVolume(v.containerMountPoint, vol) + + logger.Trace("Volume staging environment", "pre-existing_mount", isMount, "host_staging_path", hostStagingPath, "plugin_staging_path", pluginStagingPath) if isMount { - logger.Debug("re-using existing staging mount for volume", "staging_path", stagingPath) - return stagingPath, nil + logger.Debug("re-using existing staging mount for volume", "staging_path", hostStagingPath) + return nil } capability, err := capabilitiesFromVolume(vol) if err != nil { - return "", err + return err } // We currently treat all explicit CSI NodeStageVolume errors (aside from timeouts, codes.ResourceExhausted, and codes.Unavailable) @@ -183,10 +190,10 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) // In the future, we can provide more useful error messages based on // different types of error. For error documentation see: // https://github.com/container-storage-interface/spec/blob/4731db0e0bc53238b93850f43ab05d9355df0fd9/spec.md#nodestagevolume-errors - return stagingPath, v.plugin.NodeStageVolume(ctx, + return v.plugin.NodeStageVolume(ctx, vol.ID, nil, /* TODO: Get publishContext from Server */ - stagingPath, + pluginStagingPath, capability, grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), grpc_retry.WithMax(3), @@ -194,17 +201,22 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) ) } -func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, stagingPath string) (*MountInfo, error) { +func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) (*MountInfo, error) { logger := hclog.FromContext(ctx) + var pluginStagingPath string + if v.requiresStaging { + pluginStagingPath = v.stagingDirForVolume(v.containerMountPoint, vol) + } - targetPath, isMount, err := v.ensureAllocDir(vol, alloc) + hostTargetPath, isMount, err := v.ensureAllocDir(vol, alloc) if err != nil { return nil, err } + pluginTargetPath := v.allocDirForVolume(v.containerMountPoint, vol, alloc) if isMount { logger.Debug("Re-using existing published volume for allocation") - return &MountInfo{Source: targetPath}, nil + return &MountInfo{Source: hostTargetPath}, nil } capabilities, err := capabilitiesFromVolume(vol) @@ -215,8 +227,8 @@ func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolum err = v.plugin.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{ VolumeID: vol.ID, PublishContext: nil, // TODO: get publishcontext from server - StagingTargetPath: stagingPath, - TargetPath: targetPath, + StagingTargetPath: pluginStagingPath, + TargetPath: pluginTargetPath, VolumeCapability: capabilities, }, grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), @@ -224,7 +236,7 @@ func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolum grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), ) - return &MountInfo{Source: targetPath}, err + return &MountInfo{Source: hostTargetPath}, err } // MountVolume performs the steps required for using a given volume @@ -235,17 +247,13 @@ func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) ctx = hclog.WithContext(ctx, logger) - var stagingPath string - var err error - if v.requiresStaging { - stagingPath, err = v.stageVolume(ctx, vol) - if err != nil { + if err := v.stageVolume(ctx, vol); err != nil { return nil, err } } - return v.publishVolume(ctx, vol, alloc, stagingPath) + return v.publishVolume(ctx, vol, alloc) } // unstageVolume is the inverse operation of `stageVolume` and must be called @@ -255,7 +263,7 @@ func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, func (v *volumeManager) unstageVolume(ctx context.Context, vol *structs.CSIVolume) error { logger := hclog.FromContext(ctx) logger.Trace("Unstaging volume") - stagingPath := v.stagingDirForVolume(vol) + stagingPath := v.stagingDirForVolume(v.containerMountPoint, vol) return v.plugin.NodeUnstageVolume(ctx, vol.ID, stagingPath, diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index 10c828ca7..b5b74c142 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -74,8 +74,8 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) { defer os.RemoveAll(tmpPath) csiFake := &csifake.Client{} - manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, true) - expectedStagingPath := manager.stagingDirForVolume(tc.Volume) + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) + expectedStagingPath := manager.stagingDirForVolume(tmpPath, tc.Volume) if tc.CreateDirAheadOfTime { err := os.MkdirAll(expectedStagingPath, 0700) @@ -164,10 +164,10 @@ func TestVolumeManager_stageVolume(t *testing.T) { csiFake := &csifake.Client{} csiFake.NextNodeStageVolumeErr = tc.PluginErr - manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, true) + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) ctx := context.Background() - _, err := manager.stageVolume(ctx, tc.Volume) + err := manager.stageVolume(ctx, tc.Volume) if tc.ExpectedErr != nil { require.EqualError(t, err, tc.ExpectedErr.Error()) @@ -215,7 +215,7 @@ func TestVolumeManager_unstageVolume(t *testing.T) { csiFake := &csifake.Client{} csiFake.NextNodeUnstageVolumeErr = tc.PluginErr - manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, true) + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) ctx := context.Background() err := manager.unstageVolume(ctx, tc.Volume) @@ -275,10 +275,10 @@ func TestVolumeManager_publishVolume(t *testing.T) { csiFake := &csifake.Client{} csiFake.NextNodePublishVolumeErr = tc.PluginErr - manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, true) + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) ctx := context.Background() - _, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation, "") + _, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation) if tc.ExpectedErr != nil { require.EqualError(t, err, tc.ExpectedErr.Error()) From f77d3813d14c8d3a97a1d753f9ab0c245fd0dde5 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 7 Feb 2020 11:44:57 +0100 Subject: [PATCH 066/126] csi: Fix broken call to newVolumeManager --- client/pluginmanager/csimanager/instance.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index bc6c63f0f..eef8a5b89 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -90,7 +90,7 @@ func (i *instanceManager) setupVolumeManager() { case <-i.shutdownCtx.Done(): return case <-i.fp.hadFirstSuccessfulFingerprintCh: - i.volumeManager = newVolumeManager(i.logger, i.client, i.mountPoint, i.fp.requiresStaging) + i.volumeManager = newVolumeManager(i.logger, i.client, i.mountPoint, i.containerMountPoint, i.fp.requiresStaging) i.logger.Debug("Setup volume manager") close(i.volumeManagerSetupCh) return From 60901fa764448863e286aae79638cd0a508beff3 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 10 Feb 2020 11:45:06 -0500 Subject: [PATCH 067/126] csi: implement CSI controller detach request/response (#7107) This changeset implements the minimal structs on the client-side we need to compile the work-in-progress implementation of the server-to-controller RPCs. It doesn't include implementing the `ClientCSI.DettachVolume` RPC on the client. --- client/structs/csi.go | 25 +++++++++++++++ plugins/csi/client.go | 30 +++++++++++++++--- plugins/csi/client_test.go | 62 ++++++++++++++++++++++++++++++++++++-- plugins/csi/fake/client.go | 14 +++++++++ plugins/csi/plugin.go | 57 ++++++++++++++++++++++++++++++++++- 5 files changed, 180 insertions(+), 8 deletions(-) diff --git a/client/structs/csi.go b/client/structs/csi.go index d08fecfe0..9616d1b78 100644 --- a/client/structs/csi.go +++ b/client/structs/csi.go @@ -75,3 +75,28 @@ type ClientCSIControllerAttachVolumeResponse struct { // subsequent `NodeStageVolume` or `NodePublishVolume` calls PublishContext map[string]string } + +type ClientCSIControllerDetachVolumeRequest struct { + PluginName string + + // The ID of the volume to be unpublished for the node + // This field is REQUIRED. + VolumeID string + + // The ID of the node. This field is REQUIRED. This must match the NodeID that + // is fingerprinted by the target node for this plugin name. + NodeID string +} + +func (c *ClientCSIControllerDetachVolumeRequest) ToCSIRequest() *csi.ControllerUnpublishVolumeRequest { + if c == nil { + return &csi.ControllerUnpublishVolumeRequest{} + } + + return &csi.ControllerUnpublishVolumeRequest{ + VolumeID: c.VolumeID, + NodeID: c.NodeID, + } +} + +type ClientCSIControllerDetachVolumeResponse struct{} diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 16e9f485e..fe982d3f2 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -237,13 +237,12 @@ func (c *client) ControllerPublishVolume(ctx context.Context, req *ControllerPub return nil, fmt.Errorf("controllerClient not initialized") } - pbrequest := &csipbv1.ControllerPublishVolumeRequest{ - VolumeId: req.VolumeID, - NodeId: req.NodeID, - Readonly: req.ReadOnly, - //TODO: add capabilities + err := req.Validate() + if err != nil { + return nil, err } + pbrequest := req.ToCSIRepresentation() resp, err := c.controllerClient.ControllerPublishVolume(ctx, pbrequest) if err != nil { return nil, err @@ -254,6 +253,27 @@ func (c *client) ControllerPublishVolume(ctx context.Context, req *ControllerPub }, nil } +func (c *client) ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) { + if c == nil { + return nil, fmt.Errorf("Client not initialized") + } + if c.controllerClient == nil { + return nil, fmt.Errorf("controllerClient not initialized") + } + err := req.Validate() + if err != nil { + return nil, err + } + + upbrequest := req.ToCSIRepresentation() + _, err = c.controllerClient.ControllerUnpublishVolume(ctx, upbrequest) + if err != nil { + return nil, err + } + + return &ControllerUnpublishVolumeResponse{}, nil +} + // // Node Endpoints // diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index d458c58c5..5d997722e 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -360,6 +360,7 @@ func TestClient_RPC_NodeGetCapabilities(t *testing.T) { func TestClient_RPC_ControllerPublishVolume(t *testing.T) { cases := []struct { Name string + Request *ControllerPublishVolumeRequest ResponseErr error Response *csipbv1.ControllerPublishVolumeResponse ExpectedResponse *ControllerPublishVolumeResponse @@ -367,16 +368,26 @@ func TestClient_RPC_ControllerPublishVolume(t *testing.T) { }{ { Name: "handles underlying grpc errors", + Request: &ControllerPublishVolumeRequest{}, ResponseErr: fmt.Errorf("some grpc error"), ExpectedErr: fmt.Errorf("some grpc error"), }, + { + Name: "Handles missing NodeID", + Request: &ControllerPublishVolumeRequest{}, + Response: &csipbv1.ControllerPublishVolumeResponse{}, + ExpectedErr: fmt.Errorf("missing NodeID"), + }, + { Name: "Handles PublishContext == nil", + Request: &ControllerPublishVolumeRequest{VolumeID: "vol", NodeID: "node"}, Response: &csipbv1.ControllerPublishVolumeResponse{}, ExpectedResponse: &ControllerPublishVolumeResponse{}, }, { - Name: "Handles PublishContext != nil", + Name: "Handles PublishContext != nil", + Request: &ControllerPublishVolumeRequest{VolumeID: "vol", NodeID: "node"}, Response: &csipbv1.ControllerPublishVolumeResponse{ PublishContext: map[string]string{ "com.hashicorp/nomad-node-id": "foobar", @@ -400,7 +411,54 @@ func TestClient_RPC_ControllerPublishVolume(t *testing.T) { cc.NextErr = c.ResponseErr cc.NextPublishVolumeResponse = c.Response - resp, err := client.ControllerPublishVolume(context.TODO(), &ControllerPublishVolumeRequest{}) + resp, err := client.ControllerPublishVolume(context.TODO(), c.Request) + if c.ExpectedErr != nil { + require.Error(t, c.ExpectedErr, err) + } + + require.Equal(t, c.ExpectedResponse, resp) + }) + } +} + +func TestClient_RPC_ControllerUnpublishVolume(t *testing.T) { + cases := []struct { + Name string + Request *ControllerUnpublishVolumeRequest + ResponseErr error + Response *csipbv1.ControllerUnpublishVolumeResponse + ExpectedResponse *ControllerUnpublishVolumeResponse + ExpectedErr error + }{ + { + Name: "Handles underlying grpc errors", + Request: &ControllerUnpublishVolumeRequest{}, + ResponseErr: fmt.Errorf("some grpc error"), + ExpectedErr: fmt.Errorf("some grpc error"), + }, + { + Name: "Handles missing NodeID", + Request: &ControllerUnpublishVolumeRequest{}, + ExpectedErr: fmt.Errorf("missing NodeID"), + ExpectedResponse: nil, + }, + { + Name: "Handles successful response", + Request: &ControllerUnpublishVolumeRequest{VolumeID: "vol", NodeID: "node"}, + ExpectedErr: fmt.Errorf("missing NodeID"), + ExpectedResponse: &ControllerUnpublishVolumeResponse{}, + }, + } + + for _, c := range cases { + t.Run(c.Name, func(t *testing.T) { + _, cc, _, client := newTestClient() + defer client.Close() + + cc.NextErr = c.ResponseErr + cc.NextUnpublishVolumeResponse = c.Response + + resp, err := client.ControllerUnpublishVolume(context.TODO(), c.Request) if c.ExpectedErr != nil { require.Error(t, c.ExpectedErr, err) } diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index 22eecc55e..ea7513b8f 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -44,6 +44,10 @@ type Client struct { NextControllerPublishVolumeErr error ControllerPublishVolumeCallCount int64 + NextControllerUnpublishVolumeResponse *csi.ControllerUnpublishVolumeResponse + NextControllerUnpublishVolumeErr error + ControllerUnpublishVolumeCallCount int64 + NextNodeGetCapabilitiesResponse *csi.NodeCapabilitySet NextNodeGetCapabilitiesErr error NodeGetCapabilitiesCallCount int64 @@ -139,6 +143,16 @@ func (c *Client) ControllerPublishVolume(ctx context.Context, req *csi.Controlle return c.NextControllerPublishVolumeResponse, c.NextControllerPublishVolumeErr } +// ControllerUnpublishVolume is used to attach a remote volume to a node +func (c *Client) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerUnpublishVolumeCallCount++ + + return c.NextControllerUnpublishVolumeResponse, c.NextControllerUnpublishVolumeErr +} + func (c *Client) NodeGetCapabilities(ctx context.Context) (*csi.NodeCapabilitySet, error) { c.Mu.Lock() defer c.Mu.Unlock() diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index c39990c05..56dc90b63 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -36,6 +36,9 @@ type CSIPlugin interface { // ControllerPublishVolume is used to attach a remote volume to a cluster node. ControllerPublishVolume(ctx context.Context, req *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + // ControllerUnpublishVolume is used to deattach a remote volume from a cluster node. + ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + // NodeGetCapabilities is used to return the available capabilities from the // Node Service. NodeGetCapabilities(ctx context.Context) (*NodeCapabilitySet, error) @@ -223,14 +226,66 @@ type ControllerPublishVolumeRequest struct { VolumeID string NodeID string ReadOnly bool - //TODO: Add Capabilities } +func (r *ControllerPublishVolumeRequest) ToCSIRepresentation() *csipbv1.ControllerPublishVolumeRequest { + if r == nil { + return nil + } + + return &csipbv1.ControllerPublishVolumeRequest{ + VolumeId: r.VolumeID, + NodeId: r.NodeID, + Readonly: r.ReadOnly, + // TODO: add capabilities + } +} + +func (r *ControllerPublishVolumeRequest) Validate() error { + if r.VolumeID == "" { + return errors.New("missing VolumeID") + } + if r.NodeID == "" { + return errors.New("missing NodeID") + } + return nil +} + type ControllerPublishVolumeResponse struct { PublishContext map[string]string } +type ControllerUnpublishVolumeRequest struct { + VolumeID string + NodeID string +} + +func (r *ControllerUnpublishVolumeRequest) ToCSIRepresentation() *csipbv1.ControllerUnpublishVolumeRequest { + if r == nil { + return nil + } + + return &csipbv1.ControllerUnpublishVolumeRequest{ + VolumeId: r.VolumeID, + NodeId: r.NodeID, + } +} + +func (r *ControllerUnpublishVolumeRequest) Validate() error { + if r.VolumeID == "" { + return errors.New("missing VolumeID") + } + if r.NodeID == "" { + // the spec allows this but it would unpublish the + // volume from all nodes + return errors.New("missing NodeID") + } + return nil +} + +type ControllerUnpublishVolumeResponse struct{} + type NodeCapabilitySet struct { HasStageUnstageVolume bool } From 01c704ab9d117e337dd048258ebf285bf82a1dd0 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 10 Feb 2020 16:32:40 -0500 Subject: [PATCH 068/126] csi: add PublishContext to CSIVolumeClaimResponse (#7113) The `ControllerPublishVolumeResponse` CSI RPC includes the publish context intended to be passed by the orchestrator as an opaque value to the node plugins. This changeset adds it to our response to a volume claim request to proxy the controller's response back to the client node. --- nomad/structs/csi.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 6f8dbc6d9..7e667c0a9 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -431,6 +431,20 @@ type CSIVolumeClaimRequest struct { } type CSIVolumeClaimResponse struct { + // Opaque static publish properties of the volume. SP MAY use this + // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume` + // calls calls have contextual information. + // The contents of this field SHALL be opaque to nomad. + // The contents of this field SHALL NOT be mutable. + // The contents of this field SHALL be safe for the nomad to cache. + // The contents of this field SHOULD NOT contain sensitive + // information. + // The contents of this field SHOULD NOT be used for uniquely + // identifying a volume. The `volume_id` alone SHOULD be sufficient to + // identify the volume. + // This field is OPTIONAL and when present MUST be passed to + // `NodeStageVolume` or `NodePublishVolume` calls on the client + PublishContext map[string]string QueryMeta } From e56c677221fd96669d23c414d5b4494b5ed6c18c Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 7 Feb 2020 10:21:26 +0100 Subject: [PATCH 069/126] sched/feasible: CSI - Filter applicable volumes This commit filters the jobs volumes when setting them on the feasibility checker. This ensures that the rest of the checker does not have to worry about non-csi volumes. --- scheduler/feasible.go | 12 ++++++++++-- scheduler/feasible_test.go | 5 +++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/scheduler/feasible.go b/scheduler/feasible.go index b92eec137..f2ddc6888 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -125,7 +125,6 @@ func NewHostVolumeChecker(ctx Context) *HostVolumeChecker { // SetVolumes takes the volumes required by a task group and updates the checker. func (h *HostVolumeChecker) SetVolumes(volumes map[string]*structs.VolumeRequest) { lookupMap := make(map[string][]*structs.VolumeRequest) - // Convert the map from map[DesiredName]Request to map[Source][]Request to improve // lookup performance. Also filter non-host volumes. for _, req := range volumes { @@ -197,7 +196,16 @@ func NewCSIVolumeChecker(ctx Context) *CSIVolumeChecker { } func (c *CSIVolumeChecker) SetVolumes(volumes map[string]*structs.VolumeRequest) { - c.volumes = volumes + xs := make(map[string]*structs.VolumeRequest) + // Filter to only CSI Volumes + for alias, req := range volumes { + if req.Type != structs.VolumeTypeCSI { + continue + } + + xs[alias] = req + } + c.volumes = xs } func (c *CSIVolumeChecker) Feasible(n *structs.Node) bool { diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index f5c176526..dd2c48279 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -298,6 +298,11 @@ func TestCSIVolumeChecker(t *testing.T) { Name: "foo", Source: "volume-id", }, + "nonsense": { + Type: "host", + Name: "nonsense", + Source: "my-host-volume", + }, } checker := NewCSIVolumeChecker(ctx) From 15c6c05ccf36a1ee998615966e3eb6c2e9d46384 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 7 Feb 2020 10:21:56 +0100 Subject: [PATCH 070/126] api: Parse CSI Volumes Previously when deserializing volumes we skipped over volumes that were not of type `host`. This commit ensures that we parse both host and csi volumes correctly. --- command/agent/job_endpoint.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index 0483c018f..edba2a39c 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -749,8 +749,9 @@ func ApiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) { if l := len(taskGroup.Volumes); l != 0 { tg.Volumes = make(map[string]*structs.VolumeRequest, l) for k, v := range taskGroup.Volumes { - if v.Type != structs.VolumeTypeHost { - // Ignore non-host volumes in this iteration currently. + if v.Type != structs.VolumeTypeHost && v.Type != structs.VolumeTypeCSI { + // Ignore volumes we don't understand in this iteration currently. + // - This is because we don't currently have a way to return errors here. continue } From 22e8317a53d83190733ed6782b636f424ab297cf Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 7 Feb 2020 14:18:21 +0100 Subject: [PATCH 071/126] csi: Disable validation of volume topology --- nomad/structs/csi.go | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 7e667c0a9..fecdc42d8 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -379,16 +379,18 @@ func (v *CSIVolume) Validate() error { errs = append(errs, "missing attachment mode") } - var ok bool - for _, t := range v.Topologies { - if t != nil && len(t.Segments) > 0 { - ok = true - break - } - } - if !ok { - errs = append(errs, "missing topology") - } + // TODO: Volume Topologies are optional - We should check to see if the plugin + // the volume is being registered with requires them. + // var ok bool + // for _, t := range v.Topologies { + // if t != nil && len(t.Segments) > 0 { + // ok = true + // break + // } + // } + // if !ok { + // errs = append(errs, "missing topology") + // } if len(errs) > 0 { return fmt.Errorf("validation: %s", strings.Join(errs, ", ")) From a2e01c43698aad64c18a9fa7aa3b785ff61f7b61 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 7 Feb 2020 15:38:02 +0100 Subject: [PATCH 072/126] sched/feasible: Validate CSIVolume's correctly Previously we were looking up plugins based on the Alias Name for a CSI Volume within the context of its task group. Here we first look up a volume based on its identifier and then validate the existence of the plugin based on its `PluginID`. --- scheduler/feasible.go | 13 ++++++------- scheduler/feasible_test.go | 4 ++-- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/scheduler/feasible.go b/scheduler/feasible.go index f2ddc6888..78a5a245f 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -230,20 +230,19 @@ func (c *CSIVolumeChecker) hasPlugins(n *structs.Node) bool { ws := memdb.NewWatchSet() for _, req := range c.volumes { - // Check that this node has a healthy running plugin with the right PluginID - plugin, ok := n.CSINodePlugins[req.Name] - if !(ok && plugin.Healthy) { - return false - } - // Get the volume to check that it's healthy (there's a healthy controller // and the volume hasn't encountered an error or been marked for GC vol, err := c.ctx.State().CSIVolumeByID(ws, req.Source) - if err != nil || vol == nil { return false } + // Check that this node has a healthy running plugin with the right PluginID + plugin, ok := n.CSINodePlugins[vol.PluginID] + if !(ok && plugin.Healthy) { + return false + } + if (req.ReadOnly && !vol.CanReadOnly()) || !vol.CanWrite() { return false diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index dd2c48279..e5b49745f 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -293,9 +293,9 @@ func TestCSIVolumeChecker(t *testing.T) { noVolumes := map[string]*structs.VolumeRequest{} volumes := map[string]*structs.VolumeRequest{ - "foo": { + "baz": { Type: "csi", - Name: "foo", + Name: "baz", Source: "volume-id", }, "nonsense": { From 77bcaa8183f0b726843ffd4fff483546d4a5e3af Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 11 Feb 2020 12:41:18 +0100 Subject: [PATCH 073/126] csi_endpoint: Support No ACLs and restrict Nodes This commit refactors the ACL code for the CSI endpoint to support environments that run without acls enabled (e.g developer environments) and also provides an easy way to restrict which endpoints may be accessed with a client's SecretID to limit the blast radius of a malicious client on the state of the environment. --- nomad/csi_endpoint.go | 62 +++++++++++++++++++++++--------------- nomad/csi_endpoint_test.go | 43 ++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 25 deletions(-) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index d96326f50..b363219c5 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -1,7 +1,6 @@ package nomad import ( - "fmt" "time" metrics "github.com/armon/go-metrics" @@ -21,20 +20,24 @@ type CSIVolume struct { // QueryACLObj looks up the ACL token in the request and returns the acl.ACL object // - fallback to node secret ids -func (srv *Server) QueryACLObj(args *structs.QueryOptions) (*acl.ACL, error) { - if args.AuthToken == "" { - return nil, fmt.Errorf("authorization required") - } - +func (srv *Server) QueryACLObj(args *structs.QueryOptions, allowNodeAccess bool) (*acl.ACL, error) { // Lookup the token aclObj, err := srv.ResolveToken(args.AuthToken) if err != nil { // If ResolveToken had an unexpected error return that - return nil, err - } + if !structs.IsErrTokenNotFound(err) { + return nil, err + } + + // If we don't allow access to this endpoint from Nodes, then return token + // not found. + if !allowNodeAccess { + return nil, structs.ErrTokenNotFound + } - if aclObj == nil { ws := memdb.NewWatchSet() + // Attempt to lookup AuthToken as a Node.SecretID since nodes may call + // call this endpoint and don't have an ACL token. node, stateErr := srv.fsm.State().NodeBySecretID(ws, args.AuthToken) if stateErr != nil { // Return the original ResolveToken error with this err @@ -43,22 +46,24 @@ func (srv *Server) QueryACLObj(args *structs.QueryOptions) (*acl.ACL, error) { return nil, merr.ErrorOrNil() } + // We did not find a Node for this ID, so return Token Not Found. if node == nil { return nil, structs.ErrTokenNotFound } } + // Return either the users aclObj, or nil if ACLs are disabled. return aclObj, nil } // WriteACLObj calls QueryACLObj for a WriteRequest -func (srv *Server) WriteACLObj(args *structs.WriteRequest) (*acl.ACL, error) { +func (srv *Server) WriteACLObj(args *structs.WriteRequest, allowNodeAccess bool) (*acl.ACL, error) { opts := &structs.QueryOptions{ Region: args.RequestRegion(), Namespace: args.RequestNamespace(), AuthToken: args.AuthToken, } - return srv.QueryACLObj(opts) + return srv.QueryACLObj(opts, allowNodeAccess) } const ( @@ -87,7 +92,8 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV return err } - aclObj, err := v.srv.QueryACLObj(&args.QueryOptions) + allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIAccess) + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, false) if err != nil { return err } @@ -138,7 +144,7 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV // Cache ACL checks QUESTION: are they expensive? allowed, ok := cache[vol.Namespace] if !ok { - allowed = aclObj.AllowNsOp(vol.Namespace, acl.NamespaceCapabilityCSIAccess) + allowed = allowCSIAccess(aclObj, vol.Namespace) cache[vol.Namespace] = allowed } @@ -158,12 +164,13 @@ func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVol return err } - aclObj, err := v.srv.QueryACLObj(&args.QueryOptions) + allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIAccess) + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, true) if err != nil { return err } - if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSIAccess) { + if !allowCSIAccess(aclObj, args.RequestNamespace()) { return structs.ErrPermissionDenied } @@ -198,7 +205,8 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru return err } - aclObj, err := v.srv.WriteACLObj(&args.WriteRequest) + allowCSIVolumeManagement := acl.NamespaceValidator(acl.NamespaceCapabilityCSICreateVolume) + aclObj, err := v.srv.WriteACLObj(&args.WriteRequest, false) if err != nil { return err } @@ -206,7 +214,7 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru metricsStart := time.Now() defer metrics.MeasureSince([]string{"nomad", "volume", "register"}, metricsStart) - if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSICreateVolume) { + if !allowCSIVolumeManagement(aclObj, args.RequestNamespace()) { return structs.ErrPermissionDenied } @@ -238,7 +246,8 @@ func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply * return err } - aclObj, err := v.srv.WriteACLObj(&args.WriteRequest) + allowCSIVolumeManagement := acl.NamespaceValidator(acl.NamespaceCapabilityCSICreateVolume) + aclObj, err := v.srv.WriteACLObj(&args.WriteRequest, false) if err != nil { return err } @@ -247,7 +256,7 @@ func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply * defer metrics.MeasureSince([]string{"nomad", "volume", "deregister"}, metricsStart) ns := args.RequestNamespace() - if !aclObj.AllowNsOp(ns, acl.NamespaceCapabilityCSICreateVolume) { + if !allowCSIVolumeManagement(aclObj, ns) { return structs.ErrPermissionDenied } @@ -271,7 +280,8 @@ func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CS return err } - aclObj, err := v.srv.WriteACLObj(&args.WriteRequest) + allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIAccess) + aclObj, err := v.srv.WriteACLObj(&args.WriteRequest, true) if err != nil { return err } @@ -279,7 +289,7 @@ func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CS metricsStart := time.Now() defer metrics.MeasureSince([]string{"nomad", "volume", "claim"}, metricsStart) - if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSIAccess) { + if !allowCSIAccess(aclObj, args.RequestNamespace()) { return structs.ErrPermissionDenied } @@ -309,12 +319,13 @@ func (v *CSIPlugin) List(args *structs.CSIPluginListRequest, reply *structs.CSIP return err } - aclObj, err := v.srv.QueryACLObj(&args.QueryOptions) + allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIAccess) + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, false) if err != nil { return err } - if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSIAccess) { + if !allowCSIAccess(aclObj, args.RequestNamespace()) { return structs.ErrPermissionDenied } @@ -358,12 +369,13 @@ func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPlu return err } - aclObj, err := v.srv.QueryACLObj(&args.QueryOptions) + allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIAccess) + aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, false) if err != nil { return err } - if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSIAccess) { + if !allowCSIAccess(aclObj, args.RequestNamespace()) { return structs.ErrPermissionDenied } diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 7c88a91d5..d46496cad 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -23,6 +23,49 @@ func TestCSIVolumeEndpoint_Get(t *testing.T) { ns := structs.DefaultNamespace + state := srv.fsm.State() + + codec := rpcClient(t, srv) + + id0 := uuid.Generate() + + // Create the volume + vols := []*structs.CSIVolume{{ + ID: id0, + Namespace: ns, + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + PluginID: "minnie", + }} + err := state.CSIVolumeRegister(999, vols) + require.NoError(t, err) + + // Create the register request + req := &structs.CSIVolumeGetRequest{ + ID: id0, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: ns, + }, + } + + var resp structs.CSIVolumeGetResponse + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req, &resp) + require.NoError(t, err) + require.Equal(t, uint64(999), resp.Index) + require.Equal(t, vols[0].ID, resp.Volume.ID) +} + +func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + state := srv.fsm.State() state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) srv.config.ACLEnabled = true From b9b315f8d12db4c56884e45dccc8031cc0123e89 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 11 Feb 2020 10:23:55 -0500 Subject: [PATCH 074/126] csi: stub methods for server-to-controller RPC calls (#7117) --- nomad/csi_endpoint.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index b363219c5..9e11753b0 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -406,3 +406,17 @@ func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPlu }} return v.srv.blockingRPC(&opts) } + +// controllerPublishVolume sends publish request to the CSI controller +// plugin associated with a volume, if any. +func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, resp *structs.CSIVolumeClaimResponse) error { + // TODO(tgross): implement me! + return nil +} + +// controllerUnpublishVolume sends an unpublish request to the CSI +// controller plugin associated with a volume, if any. +func (srv *Server) controllerUnpublishVolume(req *structs.CSIVolumeClaimRequest, nodeID string) error { + // TODO(tgross): implement me! + return nil +} From b03b78b21296507d8bf0447f133c4276edcaf108 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 13 Feb 2020 10:18:55 -0500 Subject: [PATCH 075/126] csi: server-to-controller publish/unpublish RPCs (#7124) Nomad servers need to make requests to CSI controller plugins running on a client for publish/unpublish. The RPC needs to look up the client node based on the plugin, load balancing across controllers, and then perform the required client RPC to that node (via server forwarding if neccessary). --- nomad/csi_endpoint.go | 145 +++++++++++++++++++++++++- nomad/csi_endpoint_test.go | 203 +++++++++++++++++++++++++++++++------ nomad/structs/csi.go | 4 +- nomad/structs/node.go | 14 +++ 4 files changed, 329 insertions(+), 37 deletions(-) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 9e11753b0..ff4c050ba 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -1,6 +1,8 @@ package nomad import ( + "fmt" + "math/rand" "time" metrics "github.com/armon/go-metrics" @@ -8,6 +10,7 @@ import ( memdb "github.com/hashicorp/go-memdb" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/acl" + cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" ) @@ -185,7 +188,6 @@ func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVol if err != nil { return err } - if vol != nil { vol, err = state.CSIVolumeDenormalize(ws, vol) } @@ -293,6 +295,12 @@ func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CS return structs.ErrPermissionDenied } + // adds a PublishContext from the controller (if any) to the reply + err = v.srv.controllerPublishVolume(args, reply) + if err != nil { + return err + } + resp, index, err := v.srv.raftApply(structs.CSIVolumeClaimRequestType, args) if err != nil { v.logger.Error("csi raft apply failed", "error", err, "method", "claim") @@ -410,13 +418,144 @@ func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPlu // controllerPublishVolume sends publish request to the CSI controller // plugin associated with a volume, if any. func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, resp *structs.CSIVolumeClaimResponse) error { - // TODO(tgross): implement me! + plug, vol, err := srv.volAndPluginLookup(req.VolumeID) + if plug == nil || vol == nil || err != nil { + return err // possibly nil if no controller required + } + + method := "ClientCSI.AttachVolume" + cReq := &cstructs.ClientCSIControllerAttachVolumeRequest{ + PluginName: plug.ID, + VolumeID: req.VolumeID, + NodeID: req.Allocation.NodeID, + AttachmentMode: vol.AttachmentMode, + AccessMode: vol.AccessMode, + ReadOnly: req.Claim == structs.CSIVolumeClaimRead, + // TODO(tgross): we don't have a way of setting these yet. + // ref https://github.com/hashicorp/nomad/issues/7007 + // MountOptions: vol.MountOptions, + } + cResp := &cstructs.ClientCSIControllerAttachVolumeResponse{} + + // CSI controller plugins can block for arbitrarily long times, + // but we need to make sure it completes before we can safely + // mark the volume as claimed and return to the client so it + // can do a `NodePublish`. + err = srv.csiControllerRPC(plug, method, cReq, cResp) + if err != nil { + return err + } + resp.PublishContext = cResp.PublishContext return nil } // controllerUnpublishVolume sends an unpublish request to the CSI // controller plugin associated with a volume, if any. func (srv *Server) controllerUnpublishVolume(req *structs.CSIVolumeClaimRequest, nodeID string) error { - // TODO(tgross): implement me! + plug, vol, err := srv.volAndPluginLookup(req.VolumeID) + if plug == nil || vol == nil || err != nil { + return err // possibly nil if no controller required + } + + method := "ClientCSI.DetachVolume" + cReq := &cstructs.ClientCSIControllerDetachVolumeRequest{ + PluginName: plug.ID, + VolumeID: req.VolumeID, + NodeID: nodeID, + } + err = srv.csiControllerRPC(plug, method, cReq, + &cstructs.ClientCSIControllerDetachVolumeResponse{}) + if err != nil { + return err + } return nil } + +func (srv *Server) volAndPluginLookup(volID string) (*structs.CSIPlugin, *structs.CSIVolume, error) { + state := srv.fsm.State() + ws := memdb.NewWatchSet() + + vol, err := state.CSIVolumeByID(ws, volID) + if err != nil { + return nil, nil, err + } + if vol == nil { + return nil, nil, fmt.Errorf("volume not found: %s", volID) + } + if !vol.ControllerRequired { + return nil, nil, nil + } + + // note: we do this same lookup in CSIVolumeByID but then throw + // away the pointer to the plugin rather than attaching it to + // the volume so we have to do it again here. + plug, err := state.CSIPluginByID(ws, vol.PluginID) + if err != nil { + return nil, nil, err + } + if plug == nil { + return nil, nil, fmt.Errorf("plugin not found: %s", vol.PluginID) + } + return plug, vol, nil +} + +func (srv *Server) csiControllerRPC(plugin *structs.CSIPlugin, method string, args, reply interface{}) error { + // plugin IDs are not scoped to region/DC but volumes are. + // so any node we get for a controller is already in the same region/DC + // for the volume. + nodeID, err := srv.nodeForControllerPlugin(plugin) + if err != nil || nodeID == "" { + return err + } + err = findNodeConnAndForward(srv, nodeID, method, args, reply) + if err != nil { + return err + } + if replyErr, ok := reply.(error); ok { + return replyErr + } + return nil +} + +// nodeForControllerPlugin returns the node ID for a random controller +// to load-balance long-blocking RPCs across client nodes. +func (srv *Server) nodeForControllerPlugin(plugin *structs.CSIPlugin) (string, error) { + count := len(plugin.Controllers) + if count == 0 { + return "", fmt.Errorf("no controllers available for plugin %q", plugin.ID) + } + snap, err := srv.fsm.State().Snapshot() + if err != nil { + return "", err + } + + // iterating maps is "random" but unspecified and isn't particularly + // random with small maps, so not well-suited for load balancing. + // so we shuffle the keys and iterate over them. + clientIDs := make([]string, count) + for clientID := range plugin.Controllers { + clientIDs = append(clientIDs, clientID) + } + rand.Shuffle(count, func(i, j int) { + clientIDs[i], clientIDs[j] = clientIDs[j], clientIDs[i] + }) + + for _, clientID := range clientIDs { + controller := plugin.Controllers[clientID] + if !controller.IsController() { + // we don't have separate types for CSIInfo depending on + // whether it's a controller or node. this error shouldn't + // make it to production but is to aid developers during + // development + err = fmt.Errorf("plugin is not a controller") + continue + } + _, err = getNodeForRpc(snap, clientID) + if err != nil { + continue + } + return clientID, nil + } + + return "", err +} diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index d46496cad..1931d39c4 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -4,6 +4,7 @@ import ( "fmt" "testing" + memdb "github.com/hashicorp/go-memdb" msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/helper/uuid" @@ -197,6 +198,7 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { func TestCSIVolumeEndpoint_Claim(t *testing.T) { t.Parallel() srv, shutdown := TestServer(t, func(c *Config) { + c.ACLEnabled = true c.NumSchedulers = 0 // Prevent automatic dequeue }) defer shutdown() @@ -205,7 +207,6 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { ns := structs.DefaultNamespace state := srv.fsm.State() state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) - srv.config.ACLEnabled = true policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSICreateVolume, acl.NamespaceCapabilityCSIAccess}) accessToken := mock.CreatePolicyAndToken(t, state, 1001, @@ -230,7 +231,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { require.EqualError(t, err, fmt.Sprintf("volume not found: %s", id0), "expected 'volume not found' error because volume hasn't yet been created") - // Create a client nodes with a plugin + // Create a client node, plugin, and volume node := mock.Node() node.CSINodePlugins = map[string]*structs.CSIInfo{ "minnie": {PluginID: "minnie", @@ -238,13 +239,8 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { NodeInfo: &structs.CSINodeInfo{}, }, } - plugin := structs.NewCSIPlugin("minnie", 1) - plugin.ControllerRequired = false - plugin.AddPlugin(node.ID, &structs.CSIInfo{}) - err = state.UpsertNode(3, node) + err = state.UpsertNode(1002, node) require.NoError(t, err) - - // Create the volume for the plugin vols := []*structs.CSIVolume{{ ID: id0, Namespace: "notTheNamespace", @@ -255,24 +251,14 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { Segments: map[string]string{"foo": "bar"}, }}, }} + err = state.CSIVolumeRegister(1003, vols) - createToken := mock.CreatePolicyAndToken(t, state, 1001, - acl.NamespaceCapabilityCSICreateVolume, policy) - // Register the volume - volReq := &structs.CSIVolumeRegisterRequest{ - Volumes: vols, - WriteRequest: structs.WriteRequest{ - Region: "global", - Namespace: ns, - AuthToken: createToken.SecretID, - }, - } - volResp := &structs.CSIVolumeRegisterResponse{} - err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", volReq, volResp) + // Now our claim should succeed + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) require.NoError(t, err) - // Verify we can get the volume back out - getToken := mock.CreatePolicyAndToken(t, state, 1001, + // Verify the claim was set + getToken := mock.CreatePolicyAndToken(t, state, 1004, acl.NamespaceCapabilityCSIAccess, policy) volGetReq := &structs.CSIVolumeGetRequest{ ID: id0, @@ -285,15 +271,6 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp) require.NoError(t, err) require.Equal(t, id0, volGetResp.Volume.ID) - - // Now our claim should succeed - err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) - require.NoError(t, err) - - // Verify the claim was set - err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp) - require.NoError(t, err) - require.Equal(t, id0, volGetResp.Volume.ID) require.Len(t, volGetResp.Volume.ReadAllocs, 0) require.Len(t, volGetResp.Volume.WriteAllocs, 1) @@ -325,6 +302,66 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { require.Len(t, volGetResp.Volume.WriteAllocs, 1) } +// TestCSIVolumeEndpoint_ClaimWithController exercises the VolumeClaim RPC +// when a controller is required. +func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.ACLEnabled = true + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + ns := structs.DefaultNamespace + state := srv.fsm.State() + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + policy := mock.NamespacePolicy(ns, "", + []string{acl.NamespaceCapabilityCSICreateVolume, acl.NamespaceCapabilityCSIAccess}) + accessToken := mock.CreatePolicyAndToken(t, state, 1001, + acl.NamespaceCapabilityCSIAccess, policy) + codec := rpcClient(t, srv) + id0 := uuid.Generate() + + // Create a client node, plugin, and volume + node := mock.Node() + node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version + node.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "minnie": {PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{}, + NodeInfo: &structs.CSINodeInfo{}, + RequiresControllerPlugin: true, + }, + } + err := state.UpsertNode(1002, node) + require.NoError(t, err) + vols := []*structs.CSIVolume{{ + ID: id0, + Namespace: "notTheNamespace", + PluginID: "minnie", + ControllerRequired: true, + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }} + err = state.CSIVolumeRegister(1003, vols) + + // Make the volume claim + claimReq := &structs.CSIVolumeClaimRequest{ + VolumeID: id0, + Allocation: mock.BatchAlloc(), + Claim: structs.CSIVolumeClaimWrite, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: ns, + AuthToken: accessToken.SecretID, + }, + } + claimResp := &structs.CSIVolumeClaimResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + require.EqualError(t, err, "No path to node") +} + func TestCSIVolumeEndpoint_List(t *testing.T) { t.Parallel() srv, shutdown := TestServer(t, func(c *Config) { @@ -509,3 +546,105 @@ func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { require.NoError(t, err) require.Nil(t, resp2.Plugin) } + +func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { + srv, shutdown := TestServer(t, func(c *Config) {}) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + state := srv.fsm.State() + id0 := uuid.Generate() + id1 := uuid.Generate() + id2 := uuid.Generate() + + // Create a client node with a plugin + node := mock.Node() + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": {PluginID: "minnie", Healthy: true, RequiresControllerPlugin: true}, + "adam": {PluginID: "adam", Healthy: true}, + } + err := state.UpsertNode(3, node) + require.NoError(t, err) + + // Create 2 volumes + vols := []*structs.CSIVolume{ + { + ID: id0, + Namespace: "notTheNamespace", + PluginID: "minnie", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + ControllerRequired: true, + }, + { + ID: id1, + Namespace: "notTheNamespace", + PluginID: "adam", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + ControllerRequired: false, + }, + } + err = state.CSIVolumeRegister(1002, vols) + require.NoError(t, err) + + // has controller + plugin, vol, err := srv.volAndPluginLookup(id0) + require.NotNil(t, plugin) + require.NotNil(t, vol) + require.NoError(t, err) + + // no controller + plugin, vol, err = srv.volAndPluginLookup(id1) + require.Nil(t, plugin) + require.Nil(t, vol) + require.NoError(t, err) + + // doesn't exist + plugin, vol, err = srv.volAndPluginLookup(id2) + require.Nil(t, plugin) + require.Nil(t, vol) + require.EqualError(t, err, fmt.Sprintf("volume not found: %s", id2)) +} + +func TestCSI_NodeForControllerPlugin(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) {}) + testutil.WaitForLeader(t, srv.RPC) + defer shutdown() + + plugins := map[string]*structs.CSIInfo{ + "minnie": {PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{}, + NodeInfo: &structs.CSINodeInfo{}, + RequiresControllerPlugin: true, + }, + } + state := srv.fsm.State() + + node1 := mock.Node() + node1.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early versions + node1.CSIControllerPlugins = plugins + node2 := mock.Node() + node2.CSIControllerPlugins = plugins + node2.ID = uuid.Generate() + node3 := mock.Node() + node3.ID = uuid.Generate() + + err := state.UpsertNode(1002, node1) + require.NoError(t, err) + err = state.UpsertNode(1003, node2) + require.NoError(t, err) + err = state.UpsertNode(1004, node3) + require.NoError(t, err) + + ws := memdb.NewWatchSet() + + plugin, err := state.CSIPluginByID(ws, "minnie") + require.NoError(t, err) + nodeID, err := srv.nodeForControllerPlugin(plugin) + + // only node1 has both the controller and a recent Nomad version + require.Equal(t, nodeID, node1.ID) +} diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index fecdc42d8..3114bf6e2 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -481,9 +481,9 @@ type CSIPlugin struct { ControllerRequired bool ControllersHealthy int - Controllers map[string]*CSIInfo + Controllers map[string]*CSIInfo // map of client IDs to CSI Controllers NodesHealthy int - Nodes map[string]*CSIInfo + Nodes map[string]*CSIInfo // map of client IDs to CSI Nodes CreateIndex uint64 ModifyIndex uint64 diff --git a/nomad/structs/node.go b/nomad/structs/node.go index 2b1c578b9..69d3f0c29 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -200,6 +200,20 @@ func (c *CSIInfo) Equal(o *CSIInfo) bool { return reflect.DeepEqual(nc, no) } +func (c *CSIInfo) IsController() bool { + if c == nil || c.ControllerInfo == nil { + return false + } + return true +} + +func (c *CSIInfo) IsNode() bool { + if c == nil || c.NodeInfo == nil { + return false + } + return true +} + // DriverInfo is the current state of a single driver. This is updated // regularly as driver health changes on the node. type DriverInfo struct { From 4a2492ecb145ee7af2dd459952c31d1863e64b91 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 11 Feb 2020 14:30:34 +0100 Subject: [PATCH 076/126] client: Pass an RPC Client to AllocRunners As part of introducing support for CSI, AllocRunner hooks need to be able to communicate with Nomad Servers for validation of and interaction with storage volumes. Here we create a small RPCer interface and pass the client (rpc client) to the AR in preparation for making these RPCs. --- client/allocrunner/alloc_runner.go | 10 ++++++++++ client/allocrunner/alloc_runner_hooks.go | 2 +- client/allocrunner/config.go | 4 ++++ client/allocrunner/csi_hook.go | 12 +++++++----- client/client.go | 2 ++ 5 files changed, 24 insertions(+), 6 deletions(-) diff --git a/client/allocrunner/alloc_runner.go b/client/allocrunner/alloc_runner.go index 4cdf4ee00..cd43318c1 100644 --- a/client/allocrunner/alloc_runner.go +++ b/client/allocrunner/alloc_runner.go @@ -158,6 +158,15 @@ type allocRunner struct { serversContactedCh chan struct{} taskHookCoordinator *taskHookCoordinator + + // rpcClient is the RPC Client that should be used by the allocrunner and its + // hooks to communicate with Nomad Servers. + rpcClient RPCer +} + +// RPCer is the interface needed by hooks to make RPC calls. +type RPCer interface { + RPC(method string, args interface{}, reply interface{}) error } // NewAllocRunner returns a new allocation runner. @@ -193,6 +202,7 @@ func NewAllocRunner(config *Config) (*allocRunner, error) { devicemanager: config.DeviceManager, driverManager: config.DriverManager, serversContactedCh: config.ServersContactedCh, + rpcClient: config.RPCClient, } // Create the logger based on the allocation ID diff --git a/client/allocrunner/alloc_runner_hooks.go b/client/allocrunner/alloc_runner_hooks.go index 793ec530b..d37dd1dff 100644 --- a/client/allocrunner/alloc_runner_hooks.go +++ b/client/allocrunner/alloc_runner_hooks.go @@ -134,7 +134,7 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { logger: hookLogger, }), newConsulSockHook(hookLogger, alloc, ar.allocDir, config.ConsulConfig), - newCSIHook(hookLogger, alloc), + newCSIHook(hookLogger, alloc, ar.rpcClient), } return nil diff --git a/client/allocrunner/config.go b/client/allocrunner/config.go index 257d8db69..8eb013ede 100644 --- a/client/allocrunner/config.go +++ b/client/allocrunner/config.go @@ -68,4 +68,8 @@ type Config struct { // ServersContactedCh is closed when the first GetClientAllocs call to // servers succeeds and allocs are synced. ServersContactedCh chan struct{} + + // RPCClient is the RPC Client that should be used by the allocrunner and its + // hooks to communicate with Nomad Servers. + RPCClient RPCer } diff --git a/client/allocrunner/csi_hook.go b/client/allocrunner/csi_hook.go index cccd66397..edb427398 100644 --- a/client/allocrunner/csi_hook.go +++ b/client/allocrunner/csi_hook.go @@ -10,8 +10,9 @@ import ( // // It is a noop for allocs that do not depend on CSI Volumes. type csiHook struct { - alloc *structs.Allocation - logger hclog.Logger + alloc *structs.Allocation + logger hclog.Logger + rpcClient RPCer } func (c *csiHook) Name() string { @@ -27,10 +28,11 @@ func (c *csiHook) Prerun() error { return nil } -func newCSIHook(logger hclog.Logger, alloc *structs.Allocation) *csiHook { +func newCSIHook(logger hclog.Logger, alloc *structs.Allocation, rpcClient RPCer) *csiHook { return &csiHook{ - alloc: alloc, - logger: logger.Named("csi_hook"), + alloc: alloc, + logger: logger.Named("csi_hook"), + rpcClient: rpcClient, } } diff --git a/client/client.go b/client/client.go index fae2b2928..05ce5dd6a 100644 --- a/client/client.go +++ b/client/client.go @@ -1088,6 +1088,7 @@ func (c *Client) restoreState() error { DeviceManager: c.devicemanager, DriverManager: c.drivermanager, ServersContactedCh: c.serversContactedCh, + RPCClient: c, } c.configLock.RUnlock() @@ -2351,6 +2352,7 @@ func (c *Client) addAlloc(alloc *structs.Allocation, migrateToken string) error CSIManager: c.csimanager, DeviceManager: c.devicemanager, DriverManager: c.drivermanager, + RPCClient: c, } c.configLock.RUnlock() From 3ef41fbb868cabd58403aff1ac228ae95bd939b5 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 11 Feb 2020 14:45:16 +0100 Subject: [PATCH 077/126] csi_hook: Stage/Mount volumes as required This commit introduces the first stage of volume mounting for an allocation. The csimanager.VolumeMounter interface manages the blocking and actual minutia of the CSI implementation allowing this hook to do the minimal work of volume retrieval and creating mount info. In the future the `CSIVolume.Get` request should be replaced by `CSIVolume.Claim(Batch?)` to minimize the number of RPCs and to handle external triggering of a ControllerPublishVolume request as required. We also need to ensure that if pre-run hooks fail, we still get a full unwinding of any publish and staged volumes to ensure that there are no hanging references to volumes. That is not handled in this commit. --- client/allocrunner/alloc_runner_hooks.go | 2 +- client/allocrunner/csi_hook.go | 80 +++++++++++++++++++++--- 2 files changed, 73 insertions(+), 9 deletions(-) diff --git a/client/allocrunner/alloc_runner_hooks.go b/client/allocrunner/alloc_runner_hooks.go index d37dd1dff..06f9d381f 100644 --- a/client/allocrunner/alloc_runner_hooks.go +++ b/client/allocrunner/alloc_runner_hooks.go @@ -134,7 +134,7 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { logger: hookLogger, }), newConsulSockHook(hookLogger, alloc, ar.allocDir, config.ConsulConfig), - newCSIHook(hookLogger, alloc, ar.rpcClient), + newCSIHook(hookLogger, alloc, ar.rpcClient, ar.csiManager), } return nil diff --git a/client/allocrunner/csi_hook.go b/client/allocrunner/csi_hook.go index edb427398..de4f46da5 100644 --- a/client/allocrunner/csi_hook.go +++ b/client/allocrunner/csi_hook.go @@ -1,7 +1,11 @@ package allocrunner import ( + "context" + "fmt" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/nomad/structs" ) @@ -10,9 +14,10 @@ import ( // // It is a noop for allocs that do not depend on CSI Volumes. type csiHook struct { - alloc *structs.Allocation - logger hclog.Logger - rpcClient RPCer + alloc *structs.Allocation + logger hclog.Logger + csimanager csimanager.Manager + rpcClient RPCer } func (c *csiHook) Name() string { @@ -24,15 +29,74 @@ func (c *csiHook) Prerun() error { return nil } - // TODO: Volume attachment flow + ctx := context.TODO() + volumes, err := c.csiVolumesFromAlloc() + if err != nil { + return err + } + + mounts := make(map[string]*csimanager.MountInfo, len(volumes)) + for alias, volume := range volumes { + mounter, err := c.csimanager.MounterForVolume(ctx, volume) + if err != nil { + return err + } + + mountInfo, err := mounter.MountVolume(ctx, volume, c.alloc) + if err != nil { + return err + } + + mounts[alias] = mountInfo + } + + // TODO: Propagate mounts back to the tasks. + return nil } -func newCSIHook(logger hclog.Logger, alloc *structs.Allocation, rpcClient RPCer) *csiHook { +// csiVolumesFromAlloc finds all the CSI Volume requests from the allocation's +// task group and then fetches them from the Nomad Server, before returning +// them in the form of map[RequestedAlias]*structs.CSIVolume. +// +// If any volume fails to validate then we return an error. +func (c *csiHook) csiVolumesFromAlloc() (map[string]*structs.CSIVolume, error) { + vols := make(map[string]*structs.VolumeRequest) + tg := c.alloc.Job.LookupTaskGroup(c.alloc.TaskGroup) + for alias, vol := range tg.Volumes { + if vol.Type == structs.VolumeTypeCSI { + vols[alias] = vol + } + } + + csiVols := make(map[string]*structs.CSIVolume, len(vols)) + for alias, request := range vols { + req := &structs.CSIVolumeGetRequest{ + ID: request.Source, + } + req.Region = c.alloc.Job.Region + + var resp structs.CSIVolumeGetResponse + if err := c.rpcClient.RPC("CSIVolume.Get", req, &resp); err != nil { + return nil, err + } + + if resp.Volume == nil { + return nil, fmt.Errorf("Unexpected nil volume returned for ID: %v", request.Source) + } + + csiVols[alias] = resp.Volume + } + + return csiVols, nil +} + +func newCSIHook(logger hclog.Logger, alloc *structs.Allocation, rpcClient RPCer, csi csimanager.Manager) *csiHook { return &csiHook{ - alloc: alloc, - logger: logger.Named("csi_hook"), - rpcClient: rpcClient, + alloc: alloc, + logger: logger.Named("csi_hook"), + rpcClient: rpcClient, + csimanager: csi, } } From d8334cf884efb1495a4f207ee8afdd02a3356bbd Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 11 Feb 2020 17:39:16 +0100 Subject: [PATCH 078/126] allocrunner: Push state from hooks to taskrunners This commit is an initial (read: janky) approach to forwarding state from an allocrunner hook to a taskrunner using a similar `hookResources` approach that tr's use internally. It should eventually probably be replaced with something a little bit more message based, but for things that only come from pre-run hooks, and don't change, it's probably fine for now. --- client/allocrunner/alloc_runner.go | 4 +++ client/allocrunner/alloc_runner_hooks.go | 35 +++++++++++++++++++- client/allocrunner/csi_hook.go | 8 +++-- client/allocrunner/taskrunner/task_runner.go | 6 ++++ client/structs/allochook.go | 29 ++++++++++++++++ 5 files changed, 79 insertions(+), 3 deletions(-) create mode 100644 client/structs/allochook.go diff --git a/client/allocrunner/alloc_runner.go b/client/allocrunner/alloc_runner.go index cd43318c1..364f7b884 100644 --- a/client/allocrunner/alloc_runner.go +++ b/client/allocrunner/alloc_runner.go @@ -120,6 +120,10 @@ type allocRunner struct { // transistions. runnerHooks []interfaces.RunnerHook + // hookState is the output of allocrunner hooks + hookState *cstructs.AllocHookResources + hookStateMu sync.RWMutex + // tasks are the set of task runners tasks map[string]*taskrunner.TaskRunner diff --git a/client/allocrunner/alloc_runner_hooks.go b/client/allocrunner/alloc_runner_hooks.go index 06f9d381f..73c5f35eb 100644 --- a/client/allocrunner/alloc_runner_hooks.go +++ b/client/allocrunner/alloc_runner_hooks.go @@ -7,11 +7,41 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/client/allocrunner/interfaces" clientconfig "github.com/hashicorp/nomad/client/config" + cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" ) +type hookResourceSetter interface { + GetAllocHookResources() *cstructs.AllocHookResources + SetAllocHookResources(*cstructs.AllocHookResources) +} + +type allocHookResourceSetter struct { + ar *allocRunner +} + +func (a *allocHookResourceSetter) GetAllocHookResources() *cstructs.AllocHookResources { + a.ar.hookStateMu.RLock() + defer a.ar.hookStateMu.RUnlock() + + return a.ar.hookState +} + +func (a *allocHookResourceSetter) SetAllocHookResources(res *cstructs.AllocHookResources) { + a.ar.hookStateMu.Lock() + defer a.ar.hookStateMu.Unlock() + + a.ar.hookState = res + + // Propagate to all of the TRs within the lock to ensure consistent state. + // TODO: Refactor so TR's pull state from AR? + for _, tr := range a.ar.tasks { + tr.SetAllocHookResources(res) + } +} + type networkIsolationSetter interface { SetNetworkIsolation(*drivers.NetworkIsolationSpec) } @@ -105,6 +135,9 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { // create network isolation setting shim ns := &allocNetworkIsolationSetter{ar: ar} + // create hook resource setting shim + hrs := &allocHookResourceSetter{ar: ar} + // build the network manager nm, err := newNetworkManager(ar.Alloc(), ar.driverManager) if err != nil { @@ -134,7 +167,7 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { logger: hookLogger, }), newConsulSockHook(hookLogger, alloc, ar.allocDir, config.ConsulConfig), - newCSIHook(hookLogger, alloc, ar.rpcClient, ar.csiManager), + newCSIHook(hookLogger, alloc, ar.rpcClient, ar.csiManager, hrs), } return nil diff --git a/client/allocrunner/csi_hook.go b/client/allocrunner/csi_hook.go index de4f46da5..aa281d2cc 100644 --- a/client/allocrunner/csi_hook.go +++ b/client/allocrunner/csi_hook.go @@ -18,6 +18,7 @@ type csiHook struct { logger hclog.Logger csimanager csimanager.Manager rpcClient RPCer + updater hookResourceSetter } func (c *csiHook) Name() string { @@ -50,7 +51,9 @@ func (c *csiHook) Prerun() error { mounts[alias] = mountInfo } - // TODO: Propagate mounts back to the tasks. + res := c.updater.GetAllocHookResources() + res.CSIMounts = mounts + c.updater.SetAllocHookResources(res) return nil } @@ -91,12 +94,13 @@ func (c *csiHook) csiVolumesFromAlloc() (map[string]*structs.CSIVolume, error) { return csiVols, nil } -func newCSIHook(logger hclog.Logger, alloc *structs.Allocation, rpcClient RPCer, csi csimanager.Manager) *csiHook { +func newCSIHook(logger hclog.Logger, alloc *structs.Allocation, rpcClient RPCer, csi csimanager.Manager, updater hookResourceSetter) *csiHook { return &csiHook{ alloc: alloc, logger: logger.Named("csi_hook"), rpcClient: rpcClient, csimanager: csi, + updater: updater, } } diff --git a/client/allocrunner/taskrunner/task_runner.go b/client/allocrunner/taskrunner/task_runner.go index d33d740f3..e8a054e4c 100644 --- a/client/allocrunner/taskrunner/task_runner.go +++ b/client/allocrunner/taskrunner/task_runner.go @@ -220,6 +220,8 @@ type TaskRunner struct { networkIsolationLock sync.Mutex networkIsolationSpec *drivers.NetworkIsolationSpec + + allocHookResources *cstructs.AllocHookResources } type Config struct { @@ -1408,3 +1410,7 @@ func (tr *TaskRunner) TaskExecHandler() drivermanager.TaskExecHandler { func (tr *TaskRunner) DriverCapabilities() (*drivers.Capabilities, error) { return tr.driver.Capabilities() } + +func (tr *TaskRunner) SetAllocHookResources(res *cstructs.AllocHookResources) { + tr.allocHookResources = res +} diff --git a/client/structs/allochook.go b/client/structs/allochook.go new file mode 100644 index 000000000..59c56c0f7 --- /dev/null +++ b/client/structs/allochook.go @@ -0,0 +1,29 @@ +package structs + +import ( + "sync" + + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" +) + +// AllocHookResources contains data that is provided by AllocRunner Hooks for +// consumption by TaskRunners +type AllocHookResources struct { + CSIMounts map[string]*csimanager.MountInfo + + mu sync.RWMutex +} + +func (a *AllocHookResources) GetCSIMounts() map[string]*csimanager.MountInfo { + a.mu.RLock() + defer a.mu.RUnlock() + + return a.CSIMounts +} + +func (a *AllocHookResources) SetCSIMounts(m map[string]*csimanager.MountInfo) { + a.mu.Lock() + defer a.mu.Unlock() + + a.CSIMounts = m +} From 7a33864edf4c461b60d8f970aee76cfe6eb9fa1d Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Thu, 13 Feb 2020 11:23:50 +0100 Subject: [PATCH 079/126] volume_hook: Loosen validation in host volume prep --- client/allocrunner/taskrunner/volume_hook.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/client/allocrunner/taskrunner/volume_hook.go b/client/allocrunner/taskrunner/volume_hook.go index 15881df20..3bd3e6e96 100644 --- a/client/allocrunner/taskrunner/volume_hook.go +++ b/client/allocrunner/taskrunner/volume_hook.go @@ -57,8 +57,10 @@ func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeM for _, m := range taskMounts { req, ok := taskVolumesByAlias[m.Volume] if !ok { - // Should never happen unless we misvalidated on job submission - return nil, fmt.Errorf("No group volume declaration found named: %s", m.Volume) + // This function receives only the task volumes that are of type Host, + // if we can't find a group volume then we assume the mount is for another + // type. + continue } // This is a defensive check, but this function should only ever receive From 8692ca86bb42163e07c4f0f2c91da1e888bf5617 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Thu, 13 Feb 2020 13:57:41 +0100 Subject: [PATCH 080/126] taskrunner/volume_hook: Mounts for CSI Volumes This commit implements support for creating driver mounts for CSI Volumes. It works by fetching the created mounts from the allocation resources and then iterates through the volume requests, creating driver mount configs as required. It's a little bit messy primarily because there's _so_ much terminology overlap and it's a bit difficult to follow. --- client/allocrunner/taskrunner/volume_hook.go | 48 +++++++- .../taskrunner/volume_hook_test.go | 111 ++++++++++++++++++ 2 files changed, 156 insertions(+), 3 deletions(-) create mode 100644 client/allocrunner/taskrunner/volume_hook_test.go diff --git a/client/allocrunner/taskrunner/volume_hook.go b/client/allocrunner/taskrunner/volume_hook.go index 3bd3e6e96..d6a8ffbc5 100644 --- a/client/allocrunner/taskrunner/volume_hook.go +++ b/client/allocrunner/taskrunner/volume_hook.go @@ -122,8 +122,50 @@ func (h *volumeHook) prepareHostVolumes(volumes map[string]*structs.VolumeReques return hostVolumeMounts, nil } -func (h *volumeHook) prepareCSIVolumes(req *interfaces.TaskPrestartRequest) ([]*drivers.MountConfig, error) { - return nil, nil +// partitionMountsByVolume takes a list of volume mounts and returns them in the +// form of volume-alias:[]volume-mount because one volume may be mounted multiple +// times. +func partitionMountsByVolume(xs []*structs.VolumeMount) map[string][]*structs.VolumeMount { + result := make(map[string][]*structs.VolumeMount) + for _, mount := range xs { + result[mount.Volume] = append(result[mount.Volume], mount) + } + + return result +} + +func (h *volumeHook) prepareCSIVolumes(req *interfaces.TaskPrestartRequest, volumes map[string]*structs.VolumeRequest) ([]*drivers.MountConfig, error) { + if len(volumes) == 0 { + return nil, nil + } + + var mounts []*drivers.MountConfig + + mountRequests := partitionMountsByVolume(req.Task.VolumeMounts) + csiMountPoints := h.runner.allocHookResources.GetCSIMounts() + for alias, request := range volumes { + mountsForAlias, ok := mountRequests[alias] + if !ok { + // This task doesn't use the volume + continue + } + + csiMountPoint, ok := csiMountPoints[alias] + if !ok { + return nil, fmt.Errorf("No CSI Mount Point found for volume: %s", alias) + } + + for _, m := range mountsForAlias { + mcfg := &drivers.MountConfig{ + HostPath: csiMountPoint.Source, + TaskPath: m.Destination, + Readonly: request.ReadOnly || m.ReadOnly, + } + mounts = append(mounts, mcfg) + } + } + + return mounts, nil } func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { @@ -134,7 +176,7 @@ func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartR return err } - csiVolumeMounts, err := h.prepareCSIVolumes(req) + csiVolumeMounts, err := h.prepareCSIVolumes(req, volumes[structs.VolumeTypeCSI]) if err != nil { return err } diff --git a/client/allocrunner/taskrunner/volume_hook_test.go b/client/allocrunner/taskrunner/volume_hook_test.go new file mode 100644 index 000000000..8c0e924fb --- /dev/null +++ b/client/allocrunner/taskrunner/volume_hook_test.go @@ -0,0 +1,111 @@ +package taskrunner + +import ( + "testing" + + "github.com/hashicorp/nomad/client/allocrunner/interfaces" + "github.com/hashicorp/nomad/client/pluginmanager/csimanager" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/drivers" + "github.com/stretchr/testify/require" +) + +func TestVolumeHook_PartitionMountsByVolume_Works(t *testing.T) { + mounts := []*structs.VolumeMount{ + { + Volume: "foo", + Destination: "/tmp", + ReadOnly: false, + }, + { + Volume: "foo", + Destination: "/bar", + ReadOnly: false, + }, + { + Volume: "baz", + Destination: "/baz", + ReadOnly: false, + }, + } + + expected := map[string][]*structs.VolumeMount{ + "foo": { + { + Volume: "foo", + Destination: "/tmp", + ReadOnly: false, + }, + { + Volume: "foo", + Destination: "/bar", + ReadOnly: false, + }, + }, + "baz": { + { + Volume: "baz", + Destination: "/baz", + ReadOnly: false, + }, + }, + } + + // Test with a real collection + + partitioned := partitionMountsByVolume(mounts) + require.Equal(t, expected, partitioned) + + // Test with nil/emptylist + + partitioned = partitionMountsByVolume(nil) + require.Equal(t, map[string][]*structs.VolumeMount{}, partitioned) +} + +func TestVolumeHook_prepareCSIVolumes(t *testing.T) { + req := &interfaces.TaskPrestartRequest{ + Task: &structs.Task{ + VolumeMounts: []*structs.VolumeMount{ + { + Volume: "foo", + Destination: "/bar", + }, + }, + }, + } + + volumes := map[string]*structs.VolumeRequest{ + "foo": { + Type: "csi", + Source: "my-test-volume", + }, + } + + tr := &TaskRunner{ + allocHookResources: &cstructs.AllocHookResources{ + CSIMounts: map[string]*csimanager.MountInfo{ + "foo": &csimanager.MountInfo{ + Source: "/mnt/my-test-volume", + }, + }, + }, + } + + expected := []*drivers.MountConfig{ + { + HostPath: "/mnt/my-test-volume", + TaskPath: "/bar", + }, + } + + hook := &volumeHook{ + logger: testlog.HCLogger(t), + alloc: structs.MockAlloc(), + runner: tr, + } + mounts, err := hook.prepareCSIVolumes(req, volumes) + require.NoError(t, err) + require.Equal(t, expected, mounts) +} From 6665bdec2e05c7d9b20c311b720b8a74cefea794 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 14 Feb 2020 12:21:18 +0100 Subject: [PATCH 081/126] taskrunner/volume_hook: Cleanup arg order of prepareHostVolumes --- client/allocrunner/taskrunner/volume_hook.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/allocrunner/taskrunner/volume_hook.go b/client/allocrunner/taskrunner/volume_hook.go index d6a8ffbc5..5e35f99d3 100644 --- a/client/allocrunner/taskrunner/volume_hook.go +++ b/client/allocrunner/taskrunner/volume_hook.go @@ -103,7 +103,7 @@ func partitionVolumesByType(xs map[string]*structs.VolumeRequest) map[string]map return result } -func (h *volumeHook) prepareHostVolumes(volumes map[string]*structs.VolumeRequest, req *interfaces.TaskPrestartRequest) ([]*drivers.MountConfig, error) { +func (h *volumeHook) prepareHostVolumes(req *interfaces.TaskPrestartRequest, volumes map[string]*structs.VolumeRequest) ([]*drivers.MountConfig, error) { hostVolumes := h.runner.clientConfig.Node.HostVolumes // Always validate volumes to ensure that we do not allow volumes to be used @@ -171,7 +171,7 @@ func (h *volumeHook) prepareCSIVolumes(req *interfaces.TaskPrestartRequest, volu func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { volumes := partitionVolumesByType(h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes) - hostVolumeMounts, err := h.prepareHostVolumes(volumes[structs.VolumeTypeHost], req) + hostVolumeMounts, err := h.prepareHostVolumes(req, volumes[structs.VolumeTypeHost]) if err != nil { return err } From 6762442199374d469b7840a979f4054851cd8338 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 14 Feb 2020 13:33:52 +0100 Subject: [PATCH 082/126] csiclient: Add grpc.CallOption support to NodeUnpublishVolume --- plugins/csi/client.go | 4 ++-- plugins/csi/fake/client.go | 2 +- plugins/csi/plugin.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index fe982d3f2..f51429a26 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -394,7 +394,7 @@ func (c *client) NodePublishVolume(ctx context.Context, req *NodePublishVolumeRe return err } -func (c *client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string) error { +func (c *client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string, opts ...grpc.CallOption) error { if c == nil { return fmt.Errorf("Client not initialized") } @@ -417,6 +417,6 @@ func (c *client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath s // NodeUnpublishVolume's response contains no extra data. If err == nil, we were // successful. - _, err := c.nodeClient.NodeUnpublishVolume(ctx, req) + _, err := c.nodeClient.NodeUnpublishVolume(ctx, req, opts...) return err } diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index ea7513b8f..b72782126 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -208,7 +208,7 @@ func (c *Client) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu return c.NextNodePublishVolumeErr } -func (c *Client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string) error { +func (c *Client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string, opts ...grpc.CallOption) error { c.Mu.Lock() defer c.Mu.Unlock() diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 56dc90b63..78510a3a3 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -66,7 +66,7 @@ type CSIPlugin interface { // NodeUnpublishVolume is used to cleanup usage of a volume for an alloc. This // MUST be called before calling NodeUnstageVolume or ControllerUnpublishVolume // for the given volume. - NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string) error + NodeUnpublishVolume(ctx context.Context, volumeID, targetPath string, opts ...grpc.CallOption) error // Shutdown the client and ensure any connections are cleaned up. Close() error From a62a90e03cfa1335f0a27b607736be751b6350f3 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 14 Feb 2020 13:34:41 +0100 Subject: [PATCH 083/126] csi: Unpublish volumes during ar.Postrun This commit introduces initial support for unmounting csi volumes. It takes a relatively simplistic approach to performing NodeUnpublishVolume calls, optimising for cleaning up any leftover state rather than terminating early in the case of errors. This is because it happens during an allocation's shutdown flow and may not always have a corresponding call to `NodePublishVolume` that succeeded. --- client/allocrunner/csi_hook.go | 36 ++++++++++++ client/pluginmanager/csimanager/volume.go | 49 +++++++++++++++- .../pluginmanager/csimanager/volume_test.go | 56 +++++++++++++++++++ 3 files changed, 140 insertions(+), 1 deletion(-) diff --git a/client/allocrunner/csi_hook.go b/client/allocrunner/csi_hook.go index aa281d2cc..d618eb85a 100644 --- a/client/allocrunner/csi_hook.go +++ b/client/allocrunner/csi_hook.go @@ -5,6 +5,7 @@ import ( "fmt" hclog "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/client/pluginmanager/csimanager" "github.com/hashicorp/nomad/nomad/structs" ) @@ -58,6 +59,41 @@ func (c *csiHook) Prerun() error { return nil } +func (c *csiHook) Postrun() error { + if !c.shouldRun() { + return nil + } + + ctx := context.TODO() + volumes, err := c.csiVolumesFromAlloc() + if err != nil { + return err + } + + // For Postrun, we accumulate all unmount errors, rather than stopping on the + // first failure. This is because we want to make a best effort to free all + // storage, and in some cases there may be incorrect errors from volumes that + // never mounted correctly during prerun when an alloc is failed. It may also + // fail because a volume was externally deleted while in use by this alloc. + var result *multierror.Error + + for _, volume := range volumes { + mounter, err := c.csimanager.MounterForVolume(ctx, volume) + if err != nil { + result = multierror.Append(result, err) + continue + } + + err = mounter.UnmountVolume(ctx, volume, c.alloc) + if err != nil { + result = multierror.Append(result, err) + continue + } + } + + return result.ErrorOrNil() +} + // csiVolumesFromAlloc finds all the CSI Volume requests from the allocation's // task group and then fetches them from the Nomad Server, before returning // them in the form of map[RequestedAlias]*structs.CSIVolume. diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index 2650e2fa0..1d61112c4 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -9,6 +9,7 @@ import ( grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/helper/mount" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/csi" @@ -273,11 +274,57 @@ func (v *volumeManager) unstageVolume(ctx context.Context, vol *structs.CSIVolum ) } +func combineErrors(maybeErrs ...error) error { + var result *multierror.Error + for _, err := range maybeErrs { + if err == nil { + continue + } + + result = multierror.Append(result, err) + } + + return result.ErrorOrNil() +} + +func (v *volumeManager) unpublishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) error { + pluginTargetPath := v.allocDirForVolume(v.containerMountPoint, vol, alloc) + + rpcErr := v.plugin.NodeUnpublishVolume(ctx, vol.ID, pluginTargetPath, + grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), + grpc_retry.WithMax(3), + grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), + ) + + hostTargetPath := v.allocDirForVolume(v.mountRoot, vol, alloc) + if _, err := os.Stat(hostTargetPath); os.IsNotExist(err) { + // Host Target Path already got destroyed, just return any rpcErr + return rpcErr + } + + // Host Target Path was not cleaned up, attempt to do so here. If it's still + // a mount then removing the dir will fail and we'll return any rpcErr and the + // file error. + rmErr := os.Remove(hostTargetPath) + if rmErr != nil { + return combineErrors(rpcErr, rmErr) + } + + // We successfully removed the directory, return any rpcErrors that were + // encountered, but because we got here, they were probably flaky or was + // cleaned up externally. We might want to just return `nil` here in the + // future. + return rpcErr +} + func (v *volumeManager) UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) error { logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) ctx = hclog.WithContext(ctx, logger) - // TODO(GH-7030): NodeUnpublishVolume + err := v.unpublishVolume(ctx, vol, alloc) + if err != nil { + return err + } if !v.requiresStaging { return nil diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index b5b74c142..5b4a9bafe 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -290,3 +290,59 @@ func TestVolumeManager_publishVolume(t *testing.T) { }) } } + +func TestVolumeManager_unpublishVolume(t *testing.T) { + t.Parallel() + cases := []struct { + Name string + Allocation *structs.Allocation + Volume *structs.CSIVolume + PluginErr error + ExpectedErr error + ExpectedCSICallCount int64 + }{ + { + Name: "Returns an error when the plugin returns an error", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + }, + PluginErr: errors.New("Some Unknown Error"), + ExpectedErr: errors.New("Some Unknown Error"), + ExpectedCSICallCount: 1, + }, + { + Name: "Happy Path", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + }, + PluginErr: nil, + ExpectedErr: nil, + ExpectedCSICallCount: 1, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + tmpPath := tmpDir(t) + defer os.RemoveAll(tmpPath) + + csiFake := &csifake.Client{} + csiFake.NextNodeUnpublishVolumeErr = tc.PluginErr + + manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) + ctx := context.Background() + + err := manager.unpublishVolume(ctx, tc.Volume, tc.Allocation) + + if tc.ExpectedErr != nil { + require.EqualError(t, err, tc.ExpectedErr.Error()) + } else { + require.NoError(t, err) + } + + require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodeUnpublishVolumeCallCount) + }) + } +} From da4f6b60a2e126bf5da13c4343369088163e129f Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Mon, 17 Feb 2020 12:10:12 +0100 Subject: [PATCH 084/126] csi: Pass through usage options to the csimanager The CSI Spec requires us to attach and stage volumes based on different types of usage information when it may effect how they are bound. Here we pass through some basic usage options in the CSI Hook (specifically the volume aliases ReadOnly field), and the attachment/access mode from the volume. We pass the attachment/access mode seperately from the volume as it simplifies some handling and doesn't necessarily force every attachment to use the same mode should more be supported (I.e if we let each `volume "foo" {}` specify an override in the future). --- client/allocrunner/csi_hook.go | 46 ++++++++++----- client/pluginmanager/csimanager/interface.go | 27 ++++++++- client/pluginmanager/csimanager/volume.go | 53 +++++++++-------- .../pluginmanager/csimanager/volume_test.go | 59 ++++++++++++------- 4 files changed, 122 insertions(+), 63 deletions(-) diff --git a/client/allocrunner/csi_hook.go b/client/allocrunner/csi_hook.go index d618eb85a..409c4822a 100644 --- a/client/allocrunner/csi_hook.go +++ b/client/allocrunner/csi_hook.go @@ -38,13 +38,19 @@ func (c *csiHook) Prerun() error { } mounts := make(map[string]*csimanager.MountInfo, len(volumes)) - for alias, volume := range volumes { - mounter, err := c.csimanager.MounterForVolume(ctx, volume) + for alias, pair := range volumes { + mounter, err := c.csimanager.MounterForVolume(ctx, pair.volume) if err != nil { return err } - mountInfo, err := mounter.MountVolume(ctx, volume, c.alloc) + usageOpts := &csimanager.UsageOptions{ + ReadOnly: pair.request.ReadOnly, + AttachmentMode: string(pair.volume.AttachmentMode), + AccessMode: string(pair.volume.AccessMode), + } + + mountInfo, err := mounter.MountVolume(ctx, pair.volume, c.alloc, usageOpts) if err != nil { return err } @@ -77,14 +83,20 @@ func (c *csiHook) Postrun() error { // fail because a volume was externally deleted while in use by this alloc. var result *multierror.Error - for _, volume := range volumes { - mounter, err := c.csimanager.MounterForVolume(ctx, volume) + for _, pair := range volumes { + mounter, err := c.csimanager.MounterForVolume(ctx, pair.volume) if err != nil { result = multierror.Append(result, err) continue } - err = mounter.UnmountVolume(ctx, volume, c.alloc) + usageOpts := &csimanager.UsageOptions{ + ReadOnly: pair.request.ReadOnly, + AttachmentMode: string(pair.volume.AttachmentMode), + AccessMode: string(pair.volume.AccessMode), + } + + err = mounter.UnmountVolume(ctx, pair.volume, c.alloc, usageOpts) if err != nil { result = multierror.Append(result, err) continue @@ -94,24 +106,28 @@ func (c *csiHook) Postrun() error { return result.ErrorOrNil() } +type volumeAndRequest struct { + volume *structs.CSIVolume + request *structs.VolumeRequest +} + // csiVolumesFromAlloc finds all the CSI Volume requests from the allocation's // task group and then fetches them from the Nomad Server, before returning // them in the form of map[RequestedAlias]*structs.CSIVolume. // // If any volume fails to validate then we return an error. -func (c *csiHook) csiVolumesFromAlloc() (map[string]*structs.CSIVolume, error) { - vols := make(map[string]*structs.VolumeRequest) +func (c *csiHook) csiVolumesFromAlloc() (map[string]*volumeAndRequest, error) { + vols := make(map[string]*volumeAndRequest) tg := c.alloc.Job.LookupTaskGroup(c.alloc.TaskGroup) for alias, vol := range tg.Volumes { if vol.Type == structs.VolumeTypeCSI { - vols[alias] = vol + vols[alias] = &volumeAndRequest{request: vol} } } - csiVols := make(map[string]*structs.CSIVolume, len(vols)) - for alias, request := range vols { + for alias, pair := range vols { req := &structs.CSIVolumeGetRequest{ - ID: request.Source, + ID: pair.request.Source, } req.Region = c.alloc.Job.Region @@ -121,13 +137,13 @@ func (c *csiHook) csiVolumesFromAlloc() (map[string]*structs.CSIVolume, error) { } if resp.Volume == nil { - return nil, fmt.Errorf("Unexpected nil volume returned for ID: %v", request.Source) + return nil, fmt.Errorf("Unexpected nil volume returned for ID: %v", pair.request.Source) } - csiVols[alias] = resp.Volume + vols[alias].volume = resp.Volume } - return csiVols, nil + return vols, nil } func newCSIHook(logger hclog.Logger, alloc *structs.Allocation, rpcClient RPCer, csi csimanager.Manager, updater hookResourceSetter) *csiHook { diff --git a/client/pluginmanager/csimanager/interface.go b/client/pluginmanager/csimanager/interface.go index b969ff13e..413f8ff65 100644 --- a/client/pluginmanager/csimanager/interface.go +++ b/client/pluginmanager/csimanager/interface.go @@ -3,6 +3,7 @@ package csimanager import ( "context" "errors" + "strings" "github.com/hashicorp/nomad/client/pluginmanager" "github.com/hashicorp/nomad/nomad/structs" @@ -17,9 +18,31 @@ type MountInfo struct { IsDevice bool } +type UsageOptions struct { + ReadOnly bool + AttachmentMode string + AccessMode string +} + +func (u *UsageOptions) ToFS() string { + var sb strings.Builder + + if u.ReadOnly { + sb.WriteString("ro-") + } else { + sb.WriteString("rw-") + } + + sb.WriteString(u.AttachmentMode) + sb.WriteString("-") + sb.WriteString(u.AccessMode) + + return sb.String() +} + type VolumeMounter interface { - MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) (*MountInfo, error) - UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) error + MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usageOpts *UsageOptions) (*MountInfo, error) + UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usageOpts *UsageOptions) error } type Manager interface { diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index 1d61112c4..3d02903d9 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -61,12 +61,12 @@ func newVolumeManager(logger hclog.Logger, plugin csi.CSIPlugin, rootDir, contai } } -func (v *volumeManager) stagingDirForVolume(root string, vol *structs.CSIVolume) string { - return filepath.Join(root, StagingDirName, vol.ID, "todo-provide-usage-options") +func (v *volumeManager) stagingDirForVolume(root string, vol *structs.CSIVolume, usage *UsageOptions) string { + return filepath.Join(root, StagingDirName, vol.ID, usage.ToFS()) } -func (v *volumeManager) allocDirForVolume(root string, vol *structs.CSIVolume, alloc *structs.Allocation) string { - return filepath.Join(root, AllocSpecificDirName, alloc.ID, vol.ID, "todo-provide-usage-options") +func (v *volumeManager) allocDirForVolume(root string, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) string { + return filepath.Join(root, AllocSpecificDirName, alloc.ID, vol.ID, usage.ToFS()) } // ensureStagingDir attempts to create a directory for use when staging a volume @@ -75,8 +75,8 @@ func (v *volumeManager) allocDirForVolume(root string, vol *structs.CSIVolume, a // // Returns whether the directory is a pre-existing mountpoint, the staging path, // and any errors that occurred. -func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (string, bool, error) { - stagingPath := v.stagingDirForVolume(v.mountRoot, vol) +func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume, usage *UsageOptions) (string, bool, error) { + stagingPath := v.stagingDirForVolume(v.mountRoot, vol, usage) // Make the staging path, owned by the Nomad User if err := os.MkdirAll(stagingPath, 0700); err != nil && !os.IsExist(err) { @@ -100,8 +100,8 @@ func (v *volumeManager) ensureStagingDir(vol *structs.CSIVolume) (string, bool, // // Returns whether the directory is a pre-existing mountpoint, the publish path, // and any errors that occurred. -func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Allocation) (string, bool, error) { - allocPath := v.allocDirForVolume(v.mountRoot, vol, alloc) +func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) (string, bool, error) { + allocPath := v.allocDirForVolume(v.mountRoot, vol, alloc, usage) // Make the alloc path, owned by the Nomad User if err := os.MkdirAll(allocPath, 0700); err != nil && !os.IsExist(err) { @@ -165,14 +165,14 @@ func capabilitiesFromVolume(vol *structs.CSIVolume) (*csi.VolumeCapability, erro // stageVolume prepares a volume for use by allocations. When a plugin exposes // the STAGE_UNSTAGE_VOLUME capability it MUST be called once-per-volume for a // given usage mode before the volume can be NodePublish-ed. -func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) error { +func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume, usage *UsageOptions) error { logger := hclog.FromContext(ctx) logger.Trace("Preparing volume staging environment") - hostStagingPath, isMount, err := v.ensureStagingDir(vol) + hostStagingPath, isMount, err := v.ensureStagingDir(vol, usage) if err != nil { return err } - pluginStagingPath := v.stagingDirForVolume(v.containerMountPoint, vol) + pluginStagingPath := v.stagingDirForVolume(v.containerMountPoint, vol, usage) logger.Trace("Volume staging environment", "pre-existing_mount", isMount, "host_staging_path", hostStagingPath, "plugin_staging_path", pluginStagingPath) @@ -202,18 +202,18 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume) ) } -func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) (*MountInfo, error) { +func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) (*MountInfo, error) { logger := hclog.FromContext(ctx) var pluginStagingPath string if v.requiresStaging { - pluginStagingPath = v.stagingDirForVolume(v.containerMountPoint, vol) + pluginStagingPath = v.stagingDirForVolume(v.containerMountPoint, vol, usage) } - hostTargetPath, isMount, err := v.ensureAllocDir(vol, alloc) + hostTargetPath, isMount, err := v.ensureAllocDir(vol, alloc, usage) if err != nil { return nil, err } - pluginTargetPath := v.allocDirForVolume(v.containerMountPoint, vol, alloc) + pluginTargetPath := v.allocDirForVolume(v.containerMountPoint, vol, alloc, usage) if isMount { logger.Debug("Re-using existing published volume for allocation") @@ -231,6 +231,7 @@ func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolum StagingTargetPath: pluginStagingPath, TargetPath: pluginTargetPath, VolumeCapability: capabilities, + Readonly: usage.ReadOnly, }, grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), grpc_retry.WithMax(3), @@ -244,27 +245,27 @@ func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolum // configuration for the provided allocation. // // TODO: Validate remote volume attachment and implement. -func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) (*MountInfo, error) { +func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) (*MountInfo, error) { logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) ctx = hclog.WithContext(ctx, logger) if v.requiresStaging { - if err := v.stageVolume(ctx, vol); err != nil { + if err := v.stageVolume(ctx, vol, usage); err != nil { return nil, err } } - return v.publishVolume(ctx, vol, alloc) + return v.publishVolume(ctx, vol, alloc, usage) } // unstageVolume is the inverse operation of `stageVolume` and must be called // once for each staging path that a volume has been staged under. // It is safe to call multiple times and a plugin is required to return OK if // the volume has been unstaged or was never staged on the node. -func (v *volumeManager) unstageVolume(ctx context.Context, vol *structs.CSIVolume) error { +func (v *volumeManager) unstageVolume(ctx context.Context, vol *structs.CSIVolume, usage *UsageOptions) error { logger := hclog.FromContext(ctx) logger.Trace("Unstaging volume") - stagingPath := v.stagingDirForVolume(v.containerMountPoint, vol) + stagingPath := v.stagingDirForVolume(v.containerMountPoint, vol, usage) return v.plugin.NodeUnstageVolume(ctx, vol.ID, stagingPath, @@ -287,8 +288,8 @@ func combineErrors(maybeErrs ...error) error { return result.ErrorOrNil() } -func (v *volumeManager) unpublishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) error { - pluginTargetPath := v.allocDirForVolume(v.containerMountPoint, vol, alloc) +func (v *volumeManager) unpublishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) error { + pluginTargetPath := v.allocDirForVolume(v.containerMountPoint, vol, alloc, usage) rpcErr := v.plugin.NodeUnpublishVolume(ctx, vol.ID, pluginTargetPath, grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), @@ -296,7 +297,7 @@ func (v *volumeManager) unpublishVolume(ctx context.Context, vol *structs.CSIVol grpc_retry.WithBackoff(grpc_retry.BackoffExponential(100*time.Millisecond)), ) - hostTargetPath := v.allocDirForVolume(v.mountRoot, vol, alloc) + hostTargetPath := v.allocDirForVolume(v.mountRoot, vol, alloc, usage) if _, err := os.Stat(hostTargetPath); os.IsNotExist(err) { // Host Target Path already got destroyed, just return any rpcErr return rpcErr @@ -317,11 +318,11 @@ func (v *volumeManager) unpublishVolume(ctx context.Context, vol *structs.CSIVol return rpcErr } -func (v *volumeManager) UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation) error { +func (v *volumeManager) UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) error { logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) ctx = hclog.WithContext(ctx, logger) - err := v.unpublishVolume(ctx, vol, alloc) + err := v.unpublishVolume(ctx, vol, alloc, usage) if err != nil { return err } @@ -332,5 +333,5 @@ func (v *volumeManager) UnmountVolume(ctx context.Context, vol *structs.CSIVolum // TODO(GH-7029): Implement volume usage tracking and only unstage volumes // when the last alloc stops using it. - return v.unstageVolume(ctx, vol) + return v.unstageVolume(ctx, vol, usage) } diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index 5b4a9bafe..689bd86a7 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -27,6 +27,7 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) { cases := []struct { Name string Volume *structs.CSIVolume + UsageOptions *UsageOptions CreateDirAheadOfTime bool MountDirAheadOfTime bool @@ -34,21 +35,25 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) { ExpectedMountState bool }{ { - Name: "Creates a directory when one does not exist", - Volume: &structs.CSIVolume{ID: "foo"}, + Name: "Creates a directory when one does not exist", + Volume: &structs.CSIVolume{ID: "foo"}, + UsageOptions: &UsageOptions{}, }, { Name: "Does not fail because of a pre-existing directory", Volume: &structs.CSIVolume{ID: "foo"}, + UsageOptions: &UsageOptions{}, CreateDirAheadOfTime: true, }, { - Name: "Returns negative mount info", - Volume: &structs.CSIVolume{ID: "foo"}, + Name: "Returns negative mount info", + UsageOptions: &UsageOptions{}, + Volume: &structs.CSIVolume{ID: "foo"}, }, { Name: "Returns positive mount info", Volume: &structs.CSIVolume{ID: "foo"}, + UsageOptions: &UsageOptions{}, CreateDirAheadOfTime: true, MountDirAheadOfTime: true, ExpectedMountState: true, @@ -75,7 +80,7 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) { csiFake := &csifake.Client{} manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) - expectedStagingPath := manager.stagingDirForVolume(tmpPath, tc.Volume) + expectedStagingPath := manager.stagingDirForVolume(tmpPath, tc.Volume, tc.UsageOptions) if tc.CreateDirAheadOfTime { err := os.MkdirAll(expectedStagingPath, 0700) @@ -84,7 +89,7 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) { // Step 3: Now we can do some testing - path, detectedMount, testErr := manager.ensureStagingDir(tc.Volume) + path, detectedMount, testErr := manager.ensureStagingDir(tc.Volume, tc.UsageOptions) if tc.ExpectedErr != nil { require.EqualError(t, testErr, tc.ExpectedErr.Error()) return // We don't perform extra validation if an error was detected. @@ -112,10 +117,11 @@ func TestVolumeManager_ensureStagingDir(t *testing.T) { func TestVolumeManager_stageVolume(t *testing.T) { t.Parallel() cases := []struct { - Name string - Volume *structs.CSIVolume - PluginErr error - ExpectedErr error + Name string + Volume *structs.CSIVolume + UsageOptions *UsageOptions + PluginErr error + ExpectedErr error }{ { Name: "Returns an error when an invalid AttachmentMode is provided", @@ -123,7 +129,8 @@ func TestVolumeManager_stageVolume(t *testing.T) { ID: "foo", AttachmentMode: "nonsense", }, - ExpectedErr: errors.New("Unknown volume attachment mode: nonsense"), + UsageOptions: &UsageOptions{}, + ExpectedErr: errors.New("Unknown volume attachment mode: nonsense"), }, { Name: "Returns an error when an invalid AccessMode is provided", @@ -132,7 +139,8 @@ func TestVolumeManager_stageVolume(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, AccessMode: "nonsense", }, - ExpectedErr: errors.New("Unknown volume access mode: nonsense"), + UsageOptions: &UsageOptions{}, + ExpectedErr: errors.New("Unknown volume access mode: nonsense"), }, { Name: "Returns an error when the plugin returns an error", @@ -141,8 +149,9 @@ func TestVolumeManager_stageVolume(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, }, - PluginErr: errors.New("Some Unknown Error"), - ExpectedErr: errors.New("Some Unknown Error"), + UsageOptions: &UsageOptions{}, + PluginErr: errors.New("Some Unknown Error"), + ExpectedErr: errors.New("Some Unknown Error"), }, { Name: "Happy Path", @@ -151,8 +160,9 @@ func TestVolumeManager_stageVolume(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, }, - PluginErr: nil, - ExpectedErr: nil, + UsageOptions: &UsageOptions{}, + PluginErr: nil, + ExpectedErr: nil, }, } @@ -167,7 +177,7 @@ func TestVolumeManager_stageVolume(t *testing.T) { manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) ctx := context.Background() - err := manager.stageVolume(ctx, tc.Volume) + err := manager.stageVolume(ctx, tc.Volume, tc.UsageOptions) if tc.ExpectedErr != nil { require.EqualError(t, err, tc.ExpectedErr.Error()) @@ -183,6 +193,7 @@ func TestVolumeManager_unstageVolume(t *testing.T) { cases := []struct { Name string Volume *structs.CSIVolume + UsageOptions *UsageOptions PluginErr error ExpectedErr error ExpectedCSICallCount int64 @@ -192,6 +203,7 @@ func TestVolumeManager_unstageVolume(t *testing.T) { Volume: &structs.CSIVolume{ ID: "foo", }, + UsageOptions: &UsageOptions{}, PluginErr: errors.New("Some Unknown Error"), ExpectedErr: errors.New("Some Unknown Error"), ExpectedCSICallCount: 1, @@ -201,6 +213,7 @@ func TestVolumeManager_unstageVolume(t *testing.T) { Volume: &structs.CSIVolume{ ID: "foo", }, + UsageOptions: &UsageOptions{}, PluginErr: nil, ExpectedErr: nil, ExpectedCSICallCount: 1, @@ -218,7 +231,7 @@ func TestVolumeManager_unstageVolume(t *testing.T) { manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) ctx := context.Background() - err := manager.unstageVolume(ctx, tc.Volume) + err := manager.unstageVolume(ctx, tc.Volume, tc.UsageOptions) if tc.ExpectedErr != nil { require.EqualError(t, err, tc.ExpectedErr.Error()) @@ -237,6 +250,7 @@ func TestVolumeManager_publishVolume(t *testing.T) { Name string Allocation *structs.Allocation Volume *structs.CSIVolume + UsageOptions *UsageOptions PluginErr error ExpectedErr error ExpectedCSICallCount int64 @@ -249,6 +263,7 @@ func TestVolumeManager_publishVolume(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, }, + UsageOptions: &UsageOptions{}, PluginErr: errors.New("Some Unknown Error"), ExpectedErr: errors.New("Some Unknown Error"), ExpectedCSICallCount: 1, @@ -261,6 +276,7 @@ func TestVolumeManager_publishVolume(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice, AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, }, + UsageOptions: &UsageOptions{}, PluginErr: nil, ExpectedErr: nil, ExpectedCSICallCount: 1, @@ -278,7 +294,7 @@ func TestVolumeManager_publishVolume(t *testing.T) { manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) ctx := context.Background() - _, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation) + _, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation, tc.UsageOptions) if tc.ExpectedErr != nil { require.EqualError(t, err, tc.ExpectedErr.Error()) @@ -297,6 +313,7 @@ func TestVolumeManager_unpublishVolume(t *testing.T) { Name string Allocation *structs.Allocation Volume *structs.CSIVolume + UsageOptions *UsageOptions PluginErr error ExpectedErr error ExpectedCSICallCount int64 @@ -307,6 +324,7 @@ func TestVolumeManager_unpublishVolume(t *testing.T) { Volume: &structs.CSIVolume{ ID: "foo", }, + UsageOptions: &UsageOptions{}, PluginErr: errors.New("Some Unknown Error"), ExpectedErr: errors.New("Some Unknown Error"), ExpectedCSICallCount: 1, @@ -317,6 +335,7 @@ func TestVolumeManager_unpublishVolume(t *testing.T) { Volume: &structs.CSIVolume{ ID: "foo", }, + UsageOptions: &UsageOptions{}, PluginErr: nil, ExpectedErr: nil, ExpectedCSICallCount: 1, @@ -334,7 +353,7 @@ func TestVolumeManager_unpublishVolume(t *testing.T) { manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) ctx := context.Background() - err := manager.unpublishVolume(ctx, tc.Volume, tc.Allocation) + err := manager.unpublishVolume(ctx, tc.Volume, tc.Allocation, tc.UsageOptions) if tc.ExpectedErr != nil { require.EqualError(t, err, tc.ExpectedErr.Error()) From e227f31584e273040ec01fa1dca9ad398a7c35d1 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Mon, 17 Feb 2020 14:38:08 +0100 Subject: [PATCH 085/126] sched/feasible: Return more detailed CSI Failure messages --- scheduler/feasible.go | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/scheduler/feasible.go b/scheduler/feasible.go index 78a5a245f..44a8445f9 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -15,10 +15,13 @@ import ( ) const ( - FilterConstraintHostVolumes = "missing compatible host volumes" - FilterConstraintCSIVolumes = "missing CSI plugins" - FilterConstraintDrivers = "missing drivers" - FilterConstraintDevices = "missing devices" + FilterConstraintHostVolumes = "missing compatible host volumes" + FilterConstraintCSIPlugins = "missing CSI plugins" + FilterConstraintCSIVolumesLookupFailed = "CSI volume lookup failed" + FilterConstraintCSIVolumeNotFoundTemplate = "missing CSI Volume %s" + FilterConstraintCSIVolumeExhaustedTemplate = "CSI Volume %s has exhausted its available claims" + FilterConstraintDrivers = "missing drivers" + FilterConstraintDevices = "missing devices" ) // FeasibleIterator is used to iteratively yield nodes that @@ -209,15 +212,17 @@ func (c *CSIVolumeChecker) SetVolumes(volumes map[string]*structs.VolumeRequest) } func (c *CSIVolumeChecker) Feasible(n *structs.Node) bool { - if c.hasPlugins(n) { + hasPlugins, failReason := c.hasPlugins(n) + + if hasPlugins { return true } - c.ctx.Metrics().FilterNode(n, FilterConstraintCSIVolumes) + c.ctx.Metrics().FilterNode(n, failReason) return false } -func (c *CSIVolumeChecker) hasPlugins(n *structs.Node) bool { +func (c *CSIVolumeChecker) hasPlugins(n *structs.Node) (bool, string) { // We can mount the volume if // - if required, a healthy controller plugin is running the driver // - the volume has free claims @@ -225,7 +230,7 @@ func (c *CSIVolumeChecker) hasPlugins(n *structs.Node) bool { // Fast path: Requested no volumes. No need to check further. if len(c.volumes) == 0 { - return true + return true, "" } ws := memdb.NewWatchSet() @@ -233,24 +238,27 @@ func (c *CSIVolumeChecker) hasPlugins(n *structs.Node) bool { // Get the volume to check that it's healthy (there's a healthy controller // and the volume hasn't encountered an error or been marked for GC vol, err := c.ctx.State().CSIVolumeByID(ws, req.Source) - if err != nil || vol == nil { - return false + if err != nil { + return false, FilterConstraintCSIVolumesLookupFailed + } + if vol == nil { + return false, fmt.Sprintf(FilterConstraintCSIVolumeNotFoundTemplate, req.Source) } // Check that this node has a healthy running plugin with the right PluginID plugin, ok := n.CSINodePlugins[vol.PluginID] if !(ok && plugin.Healthy) { - return false + return false, FilterConstraintCSIPlugins } if (req.ReadOnly && !vol.CanReadOnly()) || !vol.CanWrite() { - return false + return false, FilterConstraintCSIPlugins } } - return true + return true, "" } // DriverChecker is a FeasibilityChecker which returns whether a node has the From 6b7ee96a88047d87258996f0500a12391cb6847e Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 18 Feb 2020 17:07:27 +0100 Subject: [PATCH 086/126] csi: Move VolumeCapabilties helper to package --- client/pluginmanager/csimanager/volume.go | 48 +---------------------- plugins/csi/plugin.go | 45 +++++++++++++++++++++ 2 files changed, 47 insertions(+), 46 deletions(-) diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index 3d02903d9..728624a46 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -118,50 +118,6 @@ func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Al return allocPath, !isNotMount, nil } -func capabilitiesFromVolume(vol *structs.CSIVolume) (*csi.VolumeCapability, error) { - var accessType csi.VolumeAccessType - switch vol.AttachmentMode { - case structs.CSIVolumeAttachmentModeBlockDevice: - accessType = csi.VolumeAccessTypeBlock - case structs.CSIVolumeAttachmentModeFilesystem: - accessType = csi.VolumeAccessTypeMount - default: - // These fields are validated during job submission, but here we perform a - // final check during transformation into the requisite CSI Data type to - // defend against development bugs and corrupted state - and incompatible - // nomad versions in the future. - return nil, fmt.Errorf("Unknown volume attachment mode: %s", vol.AttachmentMode) - } - - var accessMode csi.VolumeAccessMode - switch vol.AccessMode { - case structs.CSIVolumeAccessModeSingleNodeReader: - accessMode = csi.VolumeAccessModeSingleNodeReaderOnly - case structs.CSIVolumeAccessModeSingleNodeWriter: - accessMode = csi.VolumeAccessModeSingleNodeWriter - case structs.CSIVolumeAccessModeMultiNodeMultiWriter: - accessMode = csi.VolumeAccessModeMultiNodeMultiWriter - case structs.CSIVolumeAccessModeMultiNodeSingleWriter: - accessMode = csi.VolumeAccessModeMultiNodeSingleWriter - case structs.CSIVolumeAccessModeMultiNodeReader: - accessMode = csi.VolumeAccessModeMultiNodeReaderOnly - default: - // These fields are validated during job submission, but here we perform a - // final check during transformation into the requisite CSI Data type to - // defend against development bugs and corrupted state - and incompatible - // nomad versions in the future. - return nil, fmt.Errorf("Unknown volume access mode: %v", vol.AccessMode) - } - - return &csi.VolumeCapability{ - AccessType: accessType, - AccessMode: accessMode, - VolumeMountOptions: &csi.VolumeMountOptions{ - // GH-7007: Currently we have no way to provide these - }, - }, nil -} - // stageVolume prepares a volume for use by allocations. When a plugin exposes // the STAGE_UNSTAGE_VOLUME capability it MUST be called once-per-volume for a // given usage mode before the volume can be NodePublish-ed. @@ -181,7 +137,7 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume, return nil } - capability, err := capabilitiesFromVolume(vol) + capability, err := csi.VolumeCapabilityFromStructs(vol.AttachmentMode, vol.AccessMode) if err != nil { return err } @@ -220,7 +176,7 @@ func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolum return &MountInfo{Source: hostTargetPath}, nil } - capabilities, err := capabilitiesFromVolume(vol) + capabilities, err := csi.VolumeCapabilityFromStructs(vol.AttachmentMode, vol.AccessMode) if err != nil { return nil, err } diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 78510a3a3..26c7227e7 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -6,6 +6,7 @@ import ( "fmt" csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" "google.golang.org/grpc" ) @@ -389,6 +390,50 @@ type VolumeCapability struct { VolumeMountOptions *VolumeMountOptions } +func VolumeCapabilityFromStructs(sAccessType structs.CSIVolumeAttachmentMode, sAccessMode structs.CSIVolumeAccessMode) (*VolumeCapability, error) { + var accessType VolumeAccessType + switch sAccessType { + case structs.CSIVolumeAttachmentModeBlockDevice: + accessType = VolumeAccessTypeBlock + case structs.CSIVolumeAttachmentModeFilesystem: + accessType = VolumeAccessTypeMount + default: + // These fields are validated during job submission, but here we perform a + // final check during transformation into the requisite CSI Data type to + // defend against development bugs and corrupted state - and incompatible + // nomad versions in the future. + return nil, fmt.Errorf("Unknown volume attachment mode: %s", sAccessType) + } + + var accessMode VolumeAccessMode + switch sAccessMode { + case structs.CSIVolumeAccessModeSingleNodeReader: + accessMode = VolumeAccessModeSingleNodeReaderOnly + case structs.CSIVolumeAccessModeSingleNodeWriter: + accessMode = VolumeAccessModeSingleNodeWriter + case structs.CSIVolumeAccessModeMultiNodeMultiWriter: + accessMode = VolumeAccessModeMultiNodeMultiWriter + case structs.CSIVolumeAccessModeMultiNodeSingleWriter: + accessMode = VolumeAccessModeMultiNodeSingleWriter + case structs.CSIVolumeAccessModeMultiNodeReader: + accessMode = VolumeAccessModeMultiNodeReaderOnly + default: + // These fields are validated during job submission, but here we perform a + // final check during transformation into the requisite CSI Data type to + // defend against development bugs and corrupted state - and incompatible + // nomad versions in the future. + return nil, fmt.Errorf("Unknown volume access mode: %v", sAccessMode) + } + + return &VolumeCapability{ + AccessType: accessType, + AccessMode: accessMode, + VolumeMountOptions: &VolumeMountOptions{ + // GH-7007: Currently we have no way to provide these + }, + }, nil +} + func (c *VolumeCapability) ToCSIRepresentation() *csipbv1.VolumeCapability { if c == nil { return nil From 34acb596e392c773b440536e57ab07a0e92d8d18 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 18 Feb 2020 17:08:00 +0100 Subject: [PATCH 087/126] plugins/csi: Implement ConvtrollerValidateCapabilities RPC --- plugins/csi/client.go | 39 ++++++++++++++++++++++++++++++++++++++ plugins/csi/fake/client.go | 12 ++++++++++++ plugins/csi/plugin.go | 4 ++++ 3 files changed, 55 insertions(+) diff --git a/plugins/csi/client.go b/plugins/csi/client.go index f51429a26..00d3df9fe 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -274,6 +274,45 @@ func (c *client) ControllerUnpublishVolume(ctx context.Context, req *ControllerU return &ControllerUnpublishVolumeResponse{}, nil } +func (c *client) ControllerValidateCapabilties(ctx context.Context, volumeID string, capabilities *VolumeCapability) error { + if c == nil { + return fmt.Errorf("Client not initialized") + } + if c.controllerClient == nil { + return fmt.Errorf("controllerClient not initialized") + } + + if volumeID == "" { + return fmt.Errorf("missing VolumeID") + } + + if capabilities == nil { + return fmt.Errorf("missing Capabilities") + } + + req := &csipbv1.ValidateVolumeCapabilitiesRequest{ + VolumeId: volumeID, + VolumeCapabilities: []*csipbv1.VolumeCapability{ + capabilities.ToCSIRepresentation(), + }, + } + + resp, err := c.controllerClient.ValidateVolumeCapabilities(ctx, req) + if err != nil { + return err + } + + if resp.Confirmed == nil { + if resp.Message != "" { + return fmt.Errorf("Volume validation failed, message: %s", resp.Message) + } + + return fmt.Errorf("Volume validation failed") + } + + return nil +} + // // Node Endpoints // diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index b72782126..3809f9333 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -48,6 +48,9 @@ type Client struct { NextControllerUnpublishVolumeErr error ControllerUnpublishVolumeCallCount int64 + NextControllerValidateVolumeErr error + ControllerValidateVolumeCallCount int64 + NextNodeGetCapabilitiesResponse *csi.NodeCapabilitySet NextNodeGetCapabilitiesErr error NodeGetCapabilitiesCallCount int64 @@ -153,6 +156,15 @@ func (c *Client) ControllerUnpublishVolume(ctx context.Context, req *csi.Control return c.NextControllerUnpublishVolumeResponse, c.NextControllerUnpublishVolumeErr } +func (c *Client) ControllerValidateCapabilties(ctx context.Context, volumeID string, capabilities *csi.VolumeCapability) error { + c.Mu.Lock() + defer c.Mu.Unlock() + + c.ControllerValidateVolumeCallCount++ + + return c.NextControllerValidateVolumeErr +} + func (c *Client) NodeGetCapabilities(ctx context.Context) (*csi.NodeCapabilitySet, error) { c.Mu.Lock() defer c.Mu.Unlock() diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 26c7227e7..c38198241 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -40,6 +40,10 @@ type CSIPlugin interface { // ControllerUnpublishVolume is used to deattach a remote volume from a cluster node. ControllerUnpublishVolume(ctx context.Context, req *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + // ControllerValidateCapabilities is used to validate that a volume exists and + // supports the requested capability. + ControllerValidateCapabilties(ctx context.Context, volumeID string, capabilities *VolumeCapability) error + // NodeGetCapabilities is used to return the available capabilities from the // Node Service. NodeGetCapabilities(ctx context.Context) (*NodeCapabilitySet, error) From 9f1a076bd528e0c10475300a0a36af5ff550120f Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 18 Feb 2020 17:08:38 +0100 Subject: [PATCH 088/126] client: Implement ClientCSI.ControllerValidateVolume --- client/client_csi_endpoint.go | 30 ++++++++++ client/client_csi_endpoint_test.go | 89 ++++++++++++++++++++++++++++++ client/structs/csi.go | 11 ++++ 3 files changed, 130 insertions(+) diff --git a/client/client_csi_endpoint.go b/client/client_csi_endpoint.go index 6d3efab5b..0922f0dc6 100644 --- a/client/client_csi_endpoint.go +++ b/client/client_csi_endpoint.go @@ -30,6 +30,36 @@ var ( ErrPluginTypeError = errors.New("CSI Plugin loaded incorrectly") ) +// CSIControllerValidateVolume is used during volume registration to validate +// that a volume exists and that the capabilities it was registered with are +// supported by the CSI Plugin and external volume configuration. +func (c *ClientCSI) CSIControllerValidateVolume(req *structs.ClientCSIControllerValidateVolumeRequest, resp *structs.ClientCSIControllerValidateVolumeResponse) error { + defer metrics.MeasureSince([]string{"client", "csi_controller", "validate_volume"}, time.Now()) + + if req.VolumeID == "" { + return errors.New("VolumeID is required") + } + + if req.PluginID == "" { + return errors.New("PluginID is required") + } + + plugin, err := c.findControllerPlugin(req.PluginID) + if err != nil { + return err + } + defer plugin.Close() + + caps, err := csi.VolumeCapabilityFromStructs(req.AttachmentMode, req.AccessMode) + if err != nil { + return err + } + + ctx, cancelFn := c.requestContext() + defer cancelFn() + return plugin.ControllerValidateCapabilties(ctx, req.VolumeID, caps) +} + // CSIControllerAttachVolume is used to attach a volume from a CSI Cluster to // the storage node provided in the request. // diff --git a/client/client_csi_endpoint_test.go b/client/client_csi_endpoint_test.go index 7af35aa49..4e5da6bd6 100644 --- a/client/client_csi_endpoint_test.go +++ b/client/client_csi_endpoint_test.go @@ -147,3 +147,92 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { }) } } + +func TestClientCSI_CSIControllerValidateVolume(t *testing.T) { + t.Parallel() + + cases := []struct { + Name string + ClientSetupFunc func(*fake.Client) + Request *structs.ClientCSIControllerValidateVolumeRequest + ExpectedErr error + ExpectedResponse *structs.ClientCSIControllerValidateVolumeResponse + }{ + { + Name: "validates volumeid is not empty", + Request: &structs.ClientCSIControllerValidateVolumeRequest{ + PluginID: fakePlugin.Name, + }, + ExpectedErr: errors.New("VolumeID is required"), + }, + { + Name: "returns plugin not found errors", + Request: &structs.ClientCSIControllerValidateVolumeRequest{ + PluginID: "some-garbage", + VolumeID: "foo", + }, + ExpectedErr: errors.New("plugin some-garbage for type csi-controller not found"), + }, + { + Name: "validates attachmentmode", + Request: &structs.ClientCSIControllerValidateVolumeRequest{ + PluginID: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), + AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, + }, + ExpectedErr: errors.New("Unknown volume attachment mode: bar"), + }, + { + Name: "validates AccessMode", + Request: &structs.ClientCSIControllerValidateVolumeRequest{ + PluginID: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + AccessMode: nstructs.CSIVolumeAccessMode("foo"), + }, + ExpectedErr: errors.New("Unknown volume access mode: foo"), + }, + { + Name: "returns transitive errors", + ClientSetupFunc: func(fc *fake.Client) { + fc.NextControllerValidateVolumeErr = errors.New("hello") + }, + Request: &structs.ClientCSIControllerValidateVolumeRequest{ + PluginID: fakePlugin.Name, + VolumeID: "1234-4321-1234-4321", + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + }, + ExpectedErr: errors.New("hello"), + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + require := require.New(t) + client, cleanup := TestClient(t, nil) + defer cleanup() + + fakeClient := &fake.Client{} + if tc.ClientSetupFunc != nil { + tc.ClientSetupFunc(fakeClient) + } + + dispenserFunc := func(*dynamicplugins.PluginInfo) (interface{}, error) { + return fakeClient, nil + } + client.dynamicRegistry.StubDispenserForType(dynamicplugins.PluginTypeCSIController, dispenserFunc) + + err := client.dynamicRegistry.RegisterPlugin(fakePlugin) + require.Nil(err) + + var resp structs.ClientCSIControllerValidateVolumeResponse + err = client.ClientRPC("ClientCSI.CSIControllerValidateVolume", tc.Request, &resp) + require.Equal(tc.ExpectedErr, err) + if tc.ExpectedResponse != nil { + require.Equal(tc.ExpectedResponse, &resp) + } + }) + } +} diff --git a/client/structs/csi.go b/client/structs/csi.go index 9616d1b78..6b9f891be 100644 --- a/client/structs/csi.go +++ b/client/structs/csi.go @@ -20,6 +20,17 @@ type CSIVolumeMountOptions struct { MountFlags []string } +type ClientCSIControllerValidateVolumeRequest struct { + PluginID string + VolumeID string + + AttachmentMode structs.CSIVolumeAttachmentMode + AccessMode structs.CSIVolumeAccessMode +} + +type ClientCSIControllerValidateVolumeResponse struct { +} + type ClientCSIControllerAttachVolumeRequest struct { PluginName string From c3b1154703342a320c726bce1c6edfd9f0c6f9c9 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Tue, 18 Feb 2020 17:08:44 +0100 Subject: [PATCH 089/126] csi: Validate Volumes during registration This PR implements some intitial support for doing deeper validation of a volume during its registration with the server. This allows us to validate the capabilities before users attempt to use the volumes during most cases, and also prevents registering volumes without first setting up a plugin, which should help to catch typos and the like during registration. This does have the downside of requiring users to wait for (1) instance of a plugin to be running in their cluster before they can register volumes. --- nomad/csi_endpoint.go | 41 +++++++++++++++++++++++++++++++++++++- nomad/csi_endpoint_test.go | 28 ++++++++++++-------------- 2 files changed, 53 insertions(+), 16 deletions(-) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index ff4c050ba..6a0aa995b 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -201,6 +201,39 @@ func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVol return v.srv.blockingRPC(&opts) } +func (srv *Server) controllerValidateVolume(req *structs.CSIVolumeRegisterRequest, vol *structs.CSIVolume) error { + state := srv.fsm.State() + ws := memdb.NewWatchSet() + + plugin, err := state.CSIPluginByID(ws, vol.PluginID) + if err != nil { + return err + } + if plugin == nil { + return fmt.Errorf("no CSI plugin named: %s could be found", vol.PluginID) + } + + if !plugin.ControllerRequired { + // The plugin does not require a controller, so for now we won't do any + // further validation of the volume. + return nil + } + + // The plugin requires a controller. Now we do some validation of the Volume + // to ensure that the registered capabilities are valid and that the volume + // exists. + method := "ClientCSI.CSIControllerValidateVolume" + cReq := &cstructs.ClientCSIControllerValidateVolumeRequest{ + PluginID: plugin.ID, + VolumeID: vol.ID, + AttachmentMode: vol.AttachmentMode, + AccessMode: vol.AccessMode, + } + cResp := &cstructs.ClientCSIControllerValidateVolumeResponse{} + + return srv.csiControllerRPC(plugin, method, cReq, cResp) +} + // Register registers a new volume func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *structs.CSIVolumeRegisterResponse) error { if done, err := v.srv.forward("CSIVolume.Register", args, args, reply); done { @@ -220,12 +253,18 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru return structs.ErrPermissionDenied } - // This is the only namespace we ACL checked, force all the volumes to use it + // This is the only namespace we ACL checked, force all the volumes to use it. + // We also validate that the plugin exists for each plugin, and validate the + // capabilities when the plugin has a controller. for _, vol := range args.Volumes { vol.Namespace = args.RequestNamespace() if err = vol.Validate(); err != nil { return err } + + if err := v.srv.controllerValidateVolume(args, vol); err != nil { + return err + } } resp, index, err := v.srv.raftApply(structs.CSIVolumeRegisterRequestType, args) diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 1931d39c4..6d784ad77 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -116,15 +116,22 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { ns := structs.DefaultNamespace state := srv.fsm.State() - state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) - srv.config.ACLEnabled = true - policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSICreateVolume}) - validToken := mock.CreatePolicyAndToken(t, state, 1001, acl.NamespaceCapabilityCSICreateVolume, policy) - codec := rpcClient(t, srv) id0 := uuid.Generate() + // Create the node and plugin + node := mock.Node() + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": {PluginID: "minnie", + Healthy: true, + // Registers as node plugin that does not require a controller to skip + // the client RPC during registration. + NodeInfo: &structs.CSINodeInfo{}, + }, + } + require.NoError(t, state.UpsertNode(1000, node)) + // Create the volume vols := []*structs.CSIVolume{{ ID: id0, @@ -132,9 +139,6 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { PluginID: "minnie", AccessMode: structs.CSIVolumeAccessModeMultiNodeReader, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - Topologies: []*structs.CSITopology{{ - Segments: map[string]string{"foo": "bar"}, - }}, }} // Create the register request @@ -143,7 +147,6 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { WriteRequest: structs.WriteRequest{ Region: "global", Namespace: ns, - AuthToken: validToken.SecretID, }, } resp1 := &structs.CSIVolumeRegisterResponse{} @@ -152,14 +155,10 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { require.NotEqual(t, uint64(0), resp1.Index) // Get the volume back out - policy = mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) - getToken := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) - req2 := &structs.CSIVolumeGetRequest{ ID: id0, QueryOptions: structs.QueryOptions{ - Region: "global", - AuthToken: getToken.SecretID, + Region: "global", }, } resp2 := &structs.CSIVolumeGetResponse{} @@ -179,7 +178,6 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { WriteRequest: structs.WriteRequest{ Region: "global", Namespace: ns, - AuthToken: validToken.SecretID, }, } resp3 := &structs.CSIVolumeDeregisterResponse{} From 0203341033c44ed5ba468dd5ffa946c0f11ab96c Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 19 Feb 2020 11:14:13 +0100 Subject: [PATCH 090/126] csi: Add comment to UsageOptions.ToFS() --- client/pluginmanager/csimanager/interface.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/client/pluginmanager/csimanager/interface.go b/client/pluginmanager/csimanager/interface.go index 413f8ff65..96cc05940 100644 --- a/client/pluginmanager/csimanager/interface.go +++ b/client/pluginmanager/csimanager/interface.go @@ -24,6 +24,10 @@ type UsageOptions struct { AccessMode string } +// ToFS is used by a VolumeManager to construct the path to where a volume +// should be staged/published. It should always return a string that is easy +// enough to manage as a filesystem path segment (e.g avoid starting the string +// with a special character). func (u *UsageOptions) ToFS() string { var sb strings.Builder From f79351915cfb4c16fedc350b3a975cfc390218c8 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Mon, 17 Feb 2020 15:37:49 +0100 Subject: [PATCH 091/126] csi: Basic volume usage tracking --- .../pluginmanager/csimanager/usage_tracker.go | 71 +++++++++++++++++++ .../csimanager/usage_tracker_test.go | 62 ++++++++++++++++ client/pluginmanager/csimanager/volume.go | 19 +++-- 3 files changed, 145 insertions(+), 7 deletions(-) create mode 100644 client/pluginmanager/csimanager/usage_tracker.go create mode 100644 client/pluginmanager/csimanager/usage_tracker_test.go diff --git a/client/pluginmanager/csimanager/usage_tracker.go b/client/pluginmanager/csimanager/usage_tracker.go new file mode 100644 index 000000000..df97d906e --- /dev/null +++ b/client/pluginmanager/csimanager/usage_tracker.go @@ -0,0 +1,71 @@ +package csimanager + +import ( + "sync" + + "github.com/hashicorp/nomad/nomad/structs" +) + +// volumeUsageTracker tracks the allocations that depend on a given volume +type volumeUsageTracker struct { + // state is a map of volumeUsageKey to a slice of allocation ids + state map[volumeUsageKey][]string + stateMu sync.Mutex +} + +func newVolumeUsageTracker() *volumeUsageTracker { + return &volumeUsageTracker{ + state: make(map[volumeUsageKey][]string), + } +} + +type volumeUsageKey struct { + volume *structs.CSIVolume + usageOpts UsageOptions +} + +func (v *volumeUsageTracker) allocsForKey(key volumeUsageKey) []string { + return v.state[key] +} + +func (v *volumeUsageTracker) appendAlloc(key volumeUsageKey, alloc *structs.Allocation) { + allocs := v.allocsForKey(key) + allocs = append(allocs, alloc.ID) + v.state[key] = allocs +} + +func (v *volumeUsageTracker) removeAlloc(key volumeUsageKey, needle *structs.Allocation) { + allocs := v.allocsForKey(key) + var newAllocs []string + for _, allocID := range allocs { + if allocID != needle.ID { + newAllocs = append(newAllocs, allocID) + } + } + + if len(newAllocs) == 0 { + delete(v.state, key) + } else { + v.state[key] = newAllocs + } +} + +func (v *volumeUsageTracker) Claim(alloc *structs.Allocation, volume *structs.CSIVolume, usage *UsageOptions) { + v.stateMu.Lock() + defer v.stateMu.Unlock() + + key := volumeUsageKey{volume: volume, usageOpts: *usage} + v.appendAlloc(key, alloc) +} + +// Free removes the allocation from the state list for the given alloc. If the +// alloc is the last allocation for the volume then it returns true. +func (v *volumeUsageTracker) Free(alloc *structs.Allocation, volume *structs.CSIVolume, usage *UsageOptions) bool { + v.stateMu.Lock() + defer v.stateMu.Unlock() + + key := volumeUsageKey{volume: volume, usageOpts: *usage} + v.removeAlloc(key, alloc) + allocs := v.allocsForKey(key) + return len(allocs) == 0 +} diff --git a/client/pluginmanager/csimanager/usage_tracker_test.go b/client/pluginmanager/csimanager/usage_tracker_test.go new file mode 100644 index 000000000..21545e96e --- /dev/null +++ b/client/pluginmanager/csimanager/usage_tracker_test.go @@ -0,0 +1,62 @@ +package csimanager + +import ( + "testing" + + "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/stretchr/testify/require" +) + +func TestUsageTracker(t *testing.T) { + mockAllocs := []*structs.Allocation{ + mock.Alloc(), + mock.Alloc(), + mock.Alloc(), + mock.Alloc(), + mock.Alloc(), + } + + cases := []struct { + Name string + + RegisterAllocs []*structs.Allocation + FreeAllocs []*structs.Allocation + + ExpectedResult bool + }{ + { + Name: "Register and deregister all allocs", + RegisterAllocs: mockAllocs, + FreeAllocs: mockAllocs, + ExpectedResult: true, + }, + { + Name: "Register all and deregister partial allocs", + RegisterAllocs: mockAllocs, + FreeAllocs: mockAllocs[0:3], + ExpectedResult: false, + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + tracker := newVolumeUsageTracker() + + volume := &structs.CSIVolume{ + ID: "foo", + } + for _, alloc := range tc.RegisterAllocs { + tracker.Claim(alloc, volume, &UsageOptions{}) + } + + result := false + + for _, alloc := range tc.FreeAllocs { + result = tracker.Free(alloc, volume, &UsageOptions{}) + } + + require.Equal(t, tc.ExpectedResult, result, "Tracker State: %#v", tracker.state) + }) + } +} diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index 728624a46..12393748b 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -34,8 +34,7 @@ type volumeManager struct { logger hclog.Logger plugin csi.CSIPlugin - volumes map[string]interface{} - // volumesMu sync.Mutex + usageTracker *volumeUsageTracker // mountRoot is the root of where plugin directories and mounts may be created // e.g /opt/nomad.d/statedir/csi/my-csi-plugin/ @@ -57,7 +56,7 @@ func newVolumeManager(logger hclog.Logger, plugin csi.CSIPlugin, rootDir, contai mountRoot: rootDir, containerMountPoint: containerRootDir, requiresStaging: requiresStaging, - volumes: make(map[string]interface{}), + usageTracker: newVolumeUsageTracker(), } } @@ -211,7 +210,14 @@ func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, } } - return v.publishVolume(ctx, vol, alloc, usage) + mountInfo, err := v.publishVolume(ctx, vol, alloc, usage) + if err != nil { + return nil, err + } + + v.usageTracker.Claim(alloc, vol, usage) + + return mountInfo, nil } // unstageVolume is the inverse operation of `stageVolume` and must be called @@ -283,11 +289,10 @@ func (v *volumeManager) UnmountVolume(ctx context.Context, vol *structs.CSIVolum return err } - if !v.requiresStaging { + canRelease := v.usageTracker.Free(alloc, vol, usage) + if !v.requiresStaging || !canRelease { return nil } - // TODO(GH-7029): Implement volume usage tracking and only unstage volumes - // when the last alloc stops using it. return v.unstageVolume(ctx, vol, usage) } From 9d4307a3efedb3f9f6fb96e3391ff08d41b13a38 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Mon, 17 Feb 2020 13:50:37 +0100 Subject: [PATCH 092/126] csi_endpoint: Provide AllocID in req, and return Volume Currently, the client has to ship an entire allocation to the server as part of performing a VolumeClaim, this has a few problems: Firstly, it means the client is sending significantly more data than is required (an allocation contains the entire contents of a Nomad job, alongside other irrelevant state) which has a non-zero (de)serialization cost. Secondly, because the allocation was never re-fetched from the state store, it means that we were potentially open to issues caused by stale state on a misbehaving or malicious client. The change removes both of those issues at the cost of a couple of more state store lookups, but they should be relatively cheap. We also now provide the CSIVolume in the response for a claim, so the client can perform a Claim without first going ahead and fetching all of the volumes. --- nomad/csi_endpoint.go | 33 ++++++++++++++++++----- nomad/csi_endpoint_test.go | 54 ++++++++++++++++++++++---------------- nomad/fsm.go | 17 +++++++++++- nomad/node_endpoint.go | 2 +- nomad/structs/csi.go | 11 +++++--- 5 files changed, 84 insertions(+), 33 deletions(-) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 6a0aa995b..7477e088b 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -334,7 +334,7 @@ func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CS return structs.ErrPermissionDenied } - // adds a PublishContext from the controller (if any) to the reply + // adds a Volume and PublishContext from the controller (if any) to the reply err = v.srv.controllerPublishVolume(args, reply) if err != nil { return err @@ -458,15 +458,36 @@ func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPlu // plugin associated with a volume, if any. func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, resp *structs.CSIVolumeClaimResponse) error { plug, vol, err := srv.volAndPluginLookup(req.VolumeID) - if plug == nil || vol == nil || err != nil { - return err // possibly nil if no controller required + if err != nil { + return err } - method := "ClientCSI.AttachVolume" + // Set the Response volume from the lookup + resp.Volume = vol + + // Validate the existence of the allocation, regardless of whether we need it + // now. + state := srv.fsm.State() + ws := memdb.NewWatchSet() + alloc, err := state.AllocByID(ws, req.AllocationID) + if err != nil { + return err + } + if alloc == nil { + return fmt.Errorf("%s: %s", structs.ErrUnknownAllocationPrefix, req.AllocationID) + } + + // if no plugin was returned then controller validation is not required. + // Here we can return nil. + if plug == nil { + return nil + } + + method := "ClientCSI.CSIControllerAttachVolume" cReq := &cstructs.ClientCSIControllerAttachVolumeRequest{ PluginName: plug.ID, VolumeID: req.VolumeID, - NodeID: req.Allocation.NodeID, + NodeID: alloc.NodeID, AttachmentMode: vol.AttachmentMode, AccessMode: vol.AccessMode, ReadOnly: req.Claim == structs.CSIVolumeClaimRead, @@ -522,7 +543,7 @@ func (srv *Server) volAndPluginLookup(volID string) (*structs.CSIPlugin, *struct return nil, nil, fmt.Errorf("volume not found: %s", volID) } if !vol.ControllerRequired { - return nil, nil, nil + return nil, vol, nil } // note: we do this same lookup in CSIVolumeByID but then throw diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 6d784ad77..8c1905084 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -196,7 +196,6 @@ func TestCSIVolumeEndpoint_Register(t *testing.T) { func TestCSIVolumeEndpoint_Claim(t *testing.T) { t.Parallel() srv, shutdown := TestServer(t, func(c *Config) { - c.ACLEnabled = true c.NumSchedulers = 0 // Prevent automatic dequeue }) defer shutdown() @@ -204,24 +203,19 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { ns := structs.DefaultNamespace state := srv.fsm.State() - state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) - policy := mock.NamespacePolicy(ns, "", - []string{acl.NamespaceCapabilityCSICreateVolume, acl.NamespaceCapabilityCSIAccess}) - accessToken := mock.CreatePolicyAndToken(t, state, 1001, - acl.NamespaceCapabilityCSIAccess, policy) codec := rpcClient(t, srv) id0 := uuid.Generate() + alloc := mock.BatchAlloc() // Create an initial volume claim request; we expect it to fail // because there's no such volume yet. claimReq := &structs.CSIVolumeClaimRequest{ - VolumeID: id0, - Allocation: mock.BatchAlloc(), - Claim: structs.CSIVolumeClaimWrite, + VolumeID: id0, + AllocationID: alloc.ID, + Claim: structs.CSIVolumeClaimWrite, WriteRequest: structs.WriteRequest{ Region: "global", Namespace: ns, - AuthToken: accessToken.SecretID, }, } claimResp := &structs.CSIVolumeClaimResponse{} @@ -229,16 +223,18 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { require.EqualError(t, err, fmt.Sprintf("volume not found: %s", id0), "expected 'volume not found' error because volume hasn't yet been created") - // Create a client node, plugin, and volume + // Create a client node, plugin, alloc, and volume node := mock.Node() node.CSINodePlugins = map[string]*structs.CSIInfo{ - "minnie": {PluginID: "minnie", + "minnie": { + PluginID: "minnie", Healthy: true, NodeInfo: &structs.CSINodeInfo{}, }, } err = state.UpsertNode(1002, node) require.NoError(t, err) + vols := []*structs.CSIVolume{{ ID: id0, Namespace: "notTheNamespace", @@ -250,19 +246,23 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { }}, }} err = state.CSIVolumeRegister(1003, vols) + require.NoError(t, err) + + // Upsert the job and alloc + alloc.NodeID = node.ID + summary := mock.JobSummary(alloc.JobID) + require.NoError(t, state.UpsertJobSummary(1004, summary)) + require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc})) // Now our claim should succeed err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) require.NoError(t, err) // Verify the claim was set - getToken := mock.CreatePolicyAndToken(t, state, 1004, - acl.NamespaceCapabilityCSIAccess, policy) volGetReq := &structs.CSIVolumeGetRequest{ ID: id0, QueryOptions: structs.QueryOptions{ - Region: "global", - AuthToken: getToken.SecretID, + Region: "global", }, } volGetResp := &structs.CSIVolumeGetResponse{} @@ -273,7 +273,11 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { require.Len(t, volGetResp.Volume.WriteAllocs, 1) // Make another writer claim for a different alloc - claimReq.Allocation = mock.BatchAlloc() + alloc2 := mock.Alloc() + summary = mock.JobSummary(alloc2.JobID) + require.NoError(t, state.UpsertJobSummary(1005, summary)) + require.NoError(t, state.UpsertAllocs(1006, []*structs.Allocation{alloc2})) + claimReq.AllocationID = alloc2.ID err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) require.EqualError(t, err, "volume max claim reached", "expected 'volume max claim reached' because we only allow 1 writer") @@ -321,7 +325,7 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { codec := rpcClient(t, srv) id0 := uuid.Generate() - // Create a client node, plugin, and volume + // Create a client node, plugin, alloc, and volume node := mock.Node() node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version node.CSIControllerPlugins = map[string]*structs.CSIInfo{ @@ -344,11 +348,17 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { }} err = state.CSIVolumeRegister(1003, vols) + alloc := mock.BatchAlloc() + alloc.NodeID = node.ID + summary := mock.JobSummary(alloc.JobID) + require.NoError(t, state.UpsertJobSummary(1004, summary)) + require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc})) + // Make the volume claim claimReq := &structs.CSIVolumeClaimRequest{ - VolumeID: id0, - Allocation: mock.BatchAlloc(), - Claim: structs.CSIVolumeClaimWrite, + VolumeID: id0, + AllocationID: alloc.ID, + Claim: structs.CSIVolumeClaimWrite, WriteRequest: structs.WriteRequest{ Region: "global", Namespace: ns, @@ -595,7 +605,7 @@ func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { // no controller plugin, vol, err = srv.volAndPluginLookup(id1) require.Nil(t, plugin) - require.Nil(t, vol) + require.NotNil(t, vol) require.NoError(t, err) // doesn't exist diff --git a/nomad/fsm.go b/nomad/fsm.go index 38fecb3a5..4433748c5 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -1157,7 +1157,22 @@ func (n *nomadFSM) applyCSIVolumeClaim(buf []byte, index uint64) interface{} { } defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_claim"}, time.Now()) - if err := n.state.CSIVolumeClaim(index, req.VolumeID, req.Allocation, req.Claim); err != nil { + ws := memdb.NewWatchSet() + alloc, err := n.state.AllocByID(ws, req.AllocationID) + if err != nil { + n.logger.Error("AllocByID failed", "error", err) + return err + } + if alloc == nil { + n.logger.Error("AllocByID failed to find alloc", "alloc_id", req.AllocationID) + if err != nil { + return err + } + + return structs.ErrUnknownAllocationPrefix + } + + if err := n.state.CSIVolumeClaim(index, req.VolumeID, alloc, req.Claim); err != nil { n.logger.Error("CSIVolumeClaim failed", "error", err) return err } diff --git a/nomad/node_endpoint.go b/nomad/node_endpoint.go index 6639c0c0d..96acf2946 100644 --- a/nomad/node_endpoint.go +++ b/nomad/node_endpoint.go @@ -1180,7 +1180,7 @@ func (n *Node) unclaimVolumesForTerminalAllocs(args *structs.AllocUpdateRequest, req := &structs.CSIVolumeClaimRequest{ VolumeID: volume.Source, - Allocation: alloc, + AllocationID: alloc.ID, Claim: structs.CSIVolumeClaimRelease, WriteRequest: args.WriteRequest, } diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 3114bf6e2..1ea75ae01 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -426,9 +426,9 @@ const ( ) type CSIVolumeClaimRequest struct { - VolumeID string - Allocation *Allocation - Claim CSIVolumeClaimMode + VolumeID string + AllocationID string + Claim CSIVolumeClaimMode WriteRequest } @@ -447,6 +447,11 @@ type CSIVolumeClaimResponse struct { // This field is OPTIONAL and when present MUST be passed to // `NodeStageVolume` or `NodePublishVolume` calls on the client PublishContext map[string]string + + // Volume contains the expanded CSIVolume for use on the client after a Claim + // has completed. + Volume *CSIVolume + QueryMeta } From 511b7775a6b5ac40bcc66e6298401d8cf12ce03c Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Mon, 17 Feb 2020 13:57:25 +0100 Subject: [PATCH 093/126] csi: Claim CSI Volumes during csi_hook.Prerun This commit is the initial implementation of claiming volumes from the server and passes through any publishContext information as appropriate. There's nothing too fancy here. --- client/allocrunner/csi_hook.go | 55 ++++++++++++++++++- client/pluginmanager/csimanager/interface.go | 2 +- client/pluginmanager/csimanager/volume.go | 14 ++--- .../pluginmanager/csimanager/volume_test.go | 4 +- 4 files changed, 62 insertions(+), 13 deletions(-) diff --git a/client/allocrunner/csi_hook.go b/client/allocrunner/csi_hook.go index 409c4822a..8e6c05397 100644 --- a/client/allocrunner/csi_hook.go +++ b/client/allocrunner/csi_hook.go @@ -32,7 +32,7 @@ func (c *csiHook) Prerun() error { } ctx := context.TODO() - volumes, err := c.csiVolumesFromAlloc() + volumes, err := c.claimVolumesFromAlloc() if err != nil { return err } @@ -50,7 +50,7 @@ func (c *csiHook) Prerun() error { AccessMode: string(pair.volume.AccessMode), } - mountInfo, err := mounter.MountVolume(ctx, pair.volume, c.alloc, usageOpts) + mountInfo, err := mounter.MountVolume(ctx, pair.volume, c.alloc, usageOpts, pair.publishContext) if err != nil { return err } @@ -109,11 +109,60 @@ func (c *csiHook) Postrun() error { type volumeAndRequest struct { volume *structs.CSIVolume request *structs.VolumeRequest + + // When volumeAndRequest was returned from a volume claim, this field will be + // populated for plugins that require it. + publishContext map[string]string +} + +// claimVolumesFromAlloc is used by the pre-run hook to fetch all of the volume +// metadata and claim it for use by this alloc/node at the same time. +func (c *csiHook) claimVolumesFromAlloc() (map[string]*volumeAndRequest, error) { + result := make(map[string]*volumeAndRequest) + tg := c.alloc.Job.LookupTaskGroup(c.alloc.TaskGroup) + + // Initially, populate the result map with all of the requests + for alias, volumeRequest := range tg.Volumes { + if volumeRequest.Type == structs.VolumeTypeCSI { + result[alias] = &volumeAndRequest{request: volumeRequest} + } + } + + // Iterate over the result map and upsert the volume field as each volume gets + // claimed by the server. + for alias, pair := range result { + claimType := structs.CSIVolumeClaimWrite + if pair.request.ReadOnly { + claimType = structs.CSIVolumeClaimRead + } + + req := &structs.CSIVolumeClaimRequest{ + VolumeID: pair.request.Source, + AllocationID: c.alloc.ID, + Claim: claimType, + } + req.Region = c.alloc.Job.Region + + var resp structs.CSIVolumeClaimResponse + if err := c.rpcClient.RPC("CSIVolume.Claim", req, &resp); err != nil { + return nil, err + } + + if resp.Volume == nil { + return nil, fmt.Errorf("Unexpected nil volume returned for ID: %v", pair.request.Source) + } + + result[alias].volume = resp.Volume + result[alias].publishContext = resp.PublishContext + } + + return result, nil } // csiVolumesFromAlloc finds all the CSI Volume requests from the allocation's // task group and then fetches them from the Nomad Server, before returning -// them in the form of map[RequestedAlias]*structs.CSIVolume. +// them in the form of map[RequestedAlias]*volumeAndReqest. This allows us to +// thread the request context through to determine usage options for each volume. // // If any volume fails to validate then we return an error. func (c *csiHook) csiVolumesFromAlloc() (map[string]*volumeAndRequest, error) { diff --git a/client/pluginmanager/csimanager/interface.go b/client/pluginmanager/csimanager/interface.go index 96cc05940..f2458fd11 100644 --- a/client/pluginmanager/csimanager/interface.go +++ b/client/pluginmanager/csimanager/interface.go @@ -45,7 +45,7 @@ func (u *UsageOptions) ToFS() string { } type VolumeMounter interface { - MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usageOpts *UsageOptions) (*MountInfo, error) + MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usageOpts *UsageOptions, publishContext map[string]string) (*MountInfo, error) UnmountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usageOpts *UsageOptions) error } diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index 12393748b..48b116e97 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -120,7 +120,7 @@ func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Al // stageVolume prepares a volume for use by allocations. When a plugin exposes // the STAGE_UNSTAGE_VOLUME capability it MUST be called once-per-volume for a // given usage mode before the volume can be NodePublish-ed. -func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume, usage *UsageOptions) error { +func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume, usage *UsageOptions, publishContext map[string]string) error { logger := hclog.FromContext(ctx) logger.Trace("Preparing volume staging environment") hostStagingPath, isMount, err := v.ensureStagingDir(vol, usage) @@ -148,7 +148,7 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume, // https://github.com/container-storage-interface/spec/blob/4731db0e0bc53238b93850f43ab05d9355df0fd9/spec.md#nodestagevolume-errors return v.plugin.NodeStageVolume(ctx, vol.ID, - nil, /* TODO: Get publishContext from Server */ + publishContext, pluginStagingPath, capability, grpc_retry.WithPerRetryTimeout(DefaultMountActionTimeout), @@ -157,7 +157,7 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume, ) } -func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) (*MountInfo, error) { +func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions, publishContext map[string]string) (*MountInfo, error) { logger := hclog.FromContext(ctx) var pluginStagingPath string if v.requiresStaging { @@ -182,7 +182,7 @@ func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolum err = v.plugin.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{ VolumeID: vol.ID, - PublishContext: nil, // TODO: get publishcontext from server + PublishContext: publishContext, StagingTargetPath: pluginStagingPath, TargetPath: pluginTargetPath, VolumeCapability: capabilities, @@ -200,17 +200,17 @@ func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolum // configuration for the provided allocation. // // TODO: Validate remote volume attachment and implement. -func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions) (*MountInfo, error) { +func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions, publishContext map[string]string) (*MountInfo, error) { logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) ctx = hclog.WithContext(ctx, logger) if v.requiresStaging { - if err := v.stageVolume(ctx, vol, usage); err != nil { + if err := v.stageVolume(ctx, vol, usage, publishContext); err != nil { return nil, err } } - mountInfo, err := v.publishVolume(ctx, vol, alloc, usage) + mountInfo, err := v.publishVolume(ctx, vol, alloc, usage, publishContext) if err != nil { return nil, err } diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index 689bd86a7..148c55249 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -177,7 +177,7 @@ func TestVolumeManager_stageVolume(t *testing.T) { manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) ctx := context.Background() - err := manager.stageVolume(ctx, tc.Volume, tc.UsageOptions) + err := manager.stageVolume(ctx, tc.Volume, tc.UsageOptions, nil) if tc.ExpectedErr != nil { require.EqualError(t, err, tc.ExpectedErr.Error()) @@ -294,7 +294,7 @@ func TestVolumeManager_publishVolume(t *testing.T) { manager := newVolumeManager(testlog.HCLogger(t), csiFake, tmpPath, tmpPath, true) ctx := context.Background() - _, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation, tc.UsageOptions) + _, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation, tc.UsageOptions, nil) if tc.ExpectedErr != nil { require.EqualError(t, err, tc.ExpectedErr.Error()) From 1b70fb1398e45ac3466e0200ec9d2a45b839b4ae Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Mon, 17 Feb 2020 14:18:57 +0100 Subject: [PATCH 094/126] hook resources: Init with empty resources during setup --- client/allocrunner/alloc_runner_hooks.go | 1 + 1 file changed, 1 insertion(+) diff --git a/client/allocrunner/alloc_runner_hooks.go b/client/allocrunner/alloc_runner_hooks.go index 73c5f35eb..af9e6b61c 100644 --- a/client/allocrunner/alloc_runner_hooks.go +++ b/client/allocrunner/alloc_runner_hooks.go @@ -137,6 +137,7 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { // create hook resource setting shim hrs := &allocHookResourceSetter{ar: ar} + hrs.SetAllocHookResources(&cstructs.AllocHookResources{}) // build the network manager nm, err := newNetworkManager(ar.Alloc(), ar.driverManager) From cd5b4923d001f82d77602eaac10c2f68acfe7f73 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 19 Feb 2020 12:32:01 +0100 Subject: [PATCH 095/126] api: Register CSIPlugin before registering a Volume --- api/csi_test.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/api/csi_test.go b/api/csi_test.go index 3557bd0ed..03a64f039 100644 --- a/api/csi_test.go +++ b/api/csi_test.go @@ -31,12 +31,24 @@ func TestCSIVolumes_CRUD(t *testing.T) { AuthToken: root.SecretID, } + // Register a plugin job + j := c.Jobs() + job := testJob() + job.Namespace = stringToPtr("default") + job.TaskGroups[0].Tasks[0].CSIPluginConfig = &TaskCSIPluginConfig{ + ID: "foo", + Type: "monolith", + MountDir: "/not-empty", + } + _, _, err = j.Register(job, wpts) + require.NoError(t, err) + // Register a volume id := "DEADBEEF-31B5-8F78-7986-DD404FDA0CD1" _, err = v.Register(&CSIVolume{ ID: id, Namespace: "default", - PluginID: "adam", + PluginID: "foo", AccessMode: CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: CSIVolumeAttachmentModeFilesystem, Topologies: []*CSITopology{{Segments: map[string]string{"foo": "bar"}}}, From 6fc7f7779d4f1a30ac7e7f12735db4c7aab13d50 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Wed, 19 Feb 2020 12:46:11 +0100 Subject: [PATCH 096/126] csimanager/volume: Update MountVolume docstring --- client/pluginmanager/csimanager/volume.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index 48b116e97..49cec1cb9 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -198,8 +198,10 @@ func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolum // MountVolume performs the steps required for using a given volume // configuration for the provided allocation. -// -// TODO: Validate remote volume attachment and implement. +// It is passed the publishContext from remote attachment, and specific usage +// modes from the CSI Hook. +// It then uses this state to stage and publish the volume as required for use +// by the given allocation. func (v *volumeManager) MountVolume(ctx context.Context, vol *structs.CSIVolume, alloc *structs.Allocation, usage *UsageOptions, publishContext map[string]string) (*MountInfo, error) { logger := v.logger.With("volume_id", vol.ID, "alloc_id", alloc.ID) ctx = hclog.WithContext(ctx, logger) From 8bc5641438fface4c120894d1c18ae90f057e361 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 19 Feb 2020 09:05:33 -0500 Subject: [PATCH 097/126] csi: volume claim garbage collection (#7125) When an alloc is marked terminal (and after node unstage/unpublish have been called), the client syncs the terminal alloc state with the server via `Node.UpdateAlloc RPC`. For each job that has a terminal alloc, the `Node.UpdateAlloc` RPC handler at the server will emit an eval for a new core job to garbage collect CSI volume claims. When this eval is handled on the core scheduler, it will call a `volumeReap` method to release the claims for all terminal allocs on the job. The volume reap will issue a `ControllerUnpublishVolume` RPC for any node that has no alloc claiming the volume. Once this returns (or is skipped), the volume reap will send a new `CSIVolume.Claim` RPC that releases the volume claim for that allocation in the state store, making it available for scheduling again. This same `volumeReap` method will be called from the core job GC, which gives us a second chance to reclaim volumes during GC if there were controller RPC failures. --- nomad/config.go | 5 -- nomad/core_sched.go | 137 +++++++++++++++++++++++++++++++++-- nomad/core_sched_test.go | 141 ++++++++++++++++++++++++++++-------- nomad/csi_endpoint.go | 1 + nomad/leader.go | 6 -- nomad/node_endpoint.go | 61 ++++++++-------- nomad/node_endpoint_test.go | 40 ++++------ nomad/state/state_store.go | 8 -- nomad/structs/csi.go | 18 ----- nomad/structs/structs.go | 8 +- 10 files changed, 291 insertions(+), 134 deletions(-) diff --git a/nomad/config.go b/nomad/config.go index 2aced4830..8a0fb7a18 100644 --- a/nomad/config.go +++ b/nomad/config.go @@ -191,10 +191,6 @@ type Config struct { // for GC. This gives users some time to view terminal deployments. DeploymentGCThreshold time.Duration - // CSIVolumePublicationGCInterval is how often we dispatch a job to GC - // unclaimed CSI volume publications. - CSIVolumePublicationGCInterval time.Duration - // EvalNackTimeout controls how long we allow a sub-scheduler to // work on an evaluation before we consider it failed and Nack it. // This allows that evaluation to be handed to another sub-scheduler @@ -381,7 +377,6 @@ func DefaultConfig() *Config { NodeGCThreshold: 24 * time.Hour, DeploymentGCInterval: 5 * time.Minute, DeploymentGCThreshold: 1 * time.Hour, - CSIVolumePublicationGCInterval: 60 * time.Second, EvalNackTimeout: 60 * time.Second, EvalDeliveryLimit: 3, EvalNackInitialReenqueueDelay: 1 * time.Second, diff --git a/nomad/core_sched.go b/nomad/core_sched.go index 351bf25a7..601e66fe4 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -3,10 +3,12 @@ package nomad import ( "fmt" "math" + "strings" "time" log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" + multierror "github.com/hashicorp/go-multierror" version "github.com/hashicorp/go-version" "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" @@ -41,7 +43,8 @@ func NewCoreScheduler(srv *Server, snap *state.StateSnapshot) scheduler.Schedule // Process is used to implement the scheduler.Scheduler interface func (c *CoreScheduler) Process(eval *structs.Evaluation) error { - switch eval.JobID { + job := strings.Split(eval.JobID, ":") // extra data can be smuggled in w/ JobID + switch job[0] { case structs.CoreJobEvalGC: return c.evalGC(eval) case structs.CoreJobNodeGC: @@ -50,8 +53,8 @@ func (c *CoreScheduler) Process(eval *structs.Evaluation) error { return c.jobGC(eval) case structs.CoreJobDeploymentGC: return c.deploymentGC(eval) - case structs.CoreJobCSIVolumePublicationGC: - return c.csiVolumePublicationGC(eval) + case structs.CoreJobCSIVolumeClaimGC: + return c.csiVolumeClaimGC(eval) case structs.CoreJobForceGC: return c.forceGC(eval) default: @@ -143,6 +146,7 @@ OUTER: gcAlloc = append(gcAlloc, jobAlloc...) gcEval = append(gcEval, jobEval...) } + } // Fast-path the nothing case @@ -152,6 +156,11 @@ OUTER: c.logger.Debug("job GC found eligible objects", "jobs", len(gcJob), "evals", len(gcEval), "allocs", len(gcAlloc)) + // Clean up any outstanding volume claims + if err := c.volumeClaimReap(gcJob, eval.LeaderACL); err != nil { + return err + } + // Reap the evals and allocs if err := c.evalReap(gcEval, gcAlloc); err != nil { return err @@ -706,9 +715,123 @@ func allocGCEligible(a *structs.Allocation, job *structs.Job, gcTime time.Time, return timeDiff > interval.Nanoseconds() } -// csiVolumeGC is used to garbage collect CSI volume publications -func (c *CoreScheduler) csiVolumePublicationGC(eval *structs.Evaluation) error { - // TODO: implement me! - c.logger.Trace("garbage collecting unclaimed CSI volume publications") +// csiVolumeClaimGC is used to garbage collect CSI volume claims +func (c *CoreScheduler) csiVolumeClaimGC(eval *structs.Evaluation) error { + c.logger.Trace("garbage collecting unclaimed CSI volume claims") + + // JobID smuggled in with the eval's own JobID + var jobID string + evalJobID := strings.Split(eval.JobID, ":") + if len(evalJobID) != 2 { + c.logger.Error("volume gc called without jobID") + return nil + } + + jobID = evalJobID[1] + job, err := c.srv.State().JobByID(nil, eval.Namespace, jobID) + if err != nil || job == nil { + c.logger.Trace( + "cannot find job to perform volume claim GC. it may have been garbage collected", + "job", jobID) + return nil + } + c.volumeClaimReap([]*structs.Job{job}, eval.LeaderACL) return nil } + +// volumeClaimReap contacts the leader and releases volume claims from +// terminal allocs +func (c *CoreScheduler) volumeClaimReap(jobs []*structs.Job, leaderACL string) error { + ws := memdb.NewWatchSet() + var result *multierror.Error + + for _, job := range jobs { + c.logger.Trace("garbage collecting unclaimed CSI volume claims for job", "job", job.ID) + for _, taskGroup := range job.TaskGroups { + for _, tgVolume := range taskGroup.Volumes { + if tgVolume.Type != structs.VolumeTypeCSI { + continue // filter to just CSI volumes + } + volID := tgVolume.Source + vol, err := c.srv.State().CSIVolumeByID(ws, volID) + if err != nil { + result = multierror.Append(result, err) + continue + } + if vol == nil { + c.logger.Trace("cannot find volume to be GC'd. it may have been deregistered", + "volume", volID) + continue + } + vol, err = c.srv.State().CSIVolumeDenormalize(ws, vol) + if err != nil { + result = multierror.Append(result, err) + continue + } + + gcAllocs := []string{} // alloc IDs + claimedNodes := map[string]struct{}{} + knownNodes := []string{} + + collectFunc := func(allocs map[string]*structs.Allocation) { + for _, alloc := range allocs { + // we call denormalize on the volume above to populate + // Allocation pointers. But the alloc might have been + // garbage collected concurrently, so if the alloc is + // still nil we can safely skip it. + if alloc == nil { + continue + } + knownNodes = append(knownNodes, alloc.NodeID) + if !alloc.Terminated() { + // if there are any unterminated allocs, we + // don't want to unpublish the volume, just + // release the alloc's claim + claimedNodes[alloc.NodeID] = struct{}{} + continue + } + gcAllocs = append(gcAllocs, alloc.ID) + } + } + + collectFunc(vol.WriteAllocs) + collectFunc(vol.ReadAllocs) + + req := &structs.CSIVolumeClaimRequest{ + VolumeID: volID, + AllocationID: "", // controller unpublish never uses this field + Claim: structs.CSIVolumeClaimRelease, + WriteRequest: structs.WriteRequest{ + Region: job.Region, + Namespace: job.Namespace, + AuthToken: leaderACL, + }, + } + + // we only emit the controller unpublish if no other allocs + // on the node need it, but we also only want to make this + // call at most once per node + for _, node := range knownNodes { + if _, isClaimed := claimedNodes[node]; isClaimed { + continue + } + err = c.srv.controllerUnpublishVolume(req, node) + if err != nil { + result = multierror.Append(result, err) + continue + } + } + + for _, allocID := range gcAllocs { + req.AllocationID = allocID + err = c.srv.RPC("CSIVolume.Claim", req, &structs.CSIVolumeClaimResponse{}) + if err != nil { + c.logger.Error("volume claim release failed", "error", err) + result = multierror.Append(result, err) + } + } + } + } + } + return result.ErrorOrNil() +} diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 92f12b911..99e534565 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -1836,37 +1836,6 @@ func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { } } -// TODO: this is an empty test until CoreScheduler.csiVolumePublicationGC is implemented -func TestCoreScheduler_CSIVolumePublicationGC(t *testing.T) { - t.Parallel() - - s1, cleanupS1 := TestServer(t, nil) - defer cleanupS1() - testutil.WaitForLeader(t, s1.RPC) - assert := assert.New(t) - - // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 - s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) - - // TODO: insert volumes for nodes - state := s1.fsm.State() - - // Update the time tables to make this work - tt := s1.fsm.TimeTable() - tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.CSIVolumePublicationGCInterval)) - - // Create a core scheduler - snap, err := state.Snapshot() - assert.Nil(err, "Snapshot") - core := NewCoreScheduler(s1, snap) - - // Attempt the GC - gc := s1.coreJobEval(structs.CoreJobCSIVolumePublicationGC, 2000) - assert.Nil(core.Process(gc), "Process GC") - - // TODO: assert state is cleaned up -} - func TestCoreScheduler_PartitionEvalReap(t *testing.T) { t.Parallel() @@ -2224,3 +2193,113 @@ func TestAllocation_GCEligible(t *testing.T) { alloc.ClientStatus = structs.AllocClientStatusComplete require.True(allocGCEligible(alloc, nil, time.Now(), 1000)) } + +func TestCSI_GCVolumeClaims(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + // codec := rpcClient(t, srv) + state := srv.fsm.State() + ws := memdb.NewWatchSet() + + // Create a client node, plugin, and volume + node := mock.Node() + node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "csi-plugin-example": {PluginID: "csi-plugin-example", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + err := state.UpsertNode(99, node) + require.NoError(t, err) + volId0 := uuid.Generate() + vols := []*structs.CSIVolume{{ + ID: volId0, + Namespace: "notTheNamespace", + PluginID: "csi-plugin-example", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }} + err = state.CSIVolumeRegister(100, vols) + require.NoError(t, err) + vol, err := state.CSIVolumeByID(ws, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 0) + require.Len(t, vol.WriteAllocs, 0) + + // Create a job with 2 allocations + job := mock.Job() + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + "_": { + Name: "someVolume", + Type: structs.VolumeTypeCSI, + Source: volId0, + ReadOnly: false, + }, + } + err = state.UpsertJob(101, job) + require.NoError(t, err) + + alloc1 := mock.Alloc() + alloc1.JobID = job.ID + alloc1.NodeID = node.ID + err = state.UpsertJobSummary(102, mock.JobSummary(alloc1.JobID)) + require.NoError(t, err) + alloc1.TaskGroup = job.TaskGroups[0].Name + + alloc2 := mock.Alloc() + alloc2.JobID = job.ID + alloc2.NodeID = node.ID + err = state.UpsertJobSummary(103, mock.JobSummary(alloc2.JobID)) + require.NoError(t, err) + alloc2.TaskGroup = job.TaskGroups[0].Name + + err = state.UpsertAllocs(104, []*structs.Allocation{alloc1, alloc2}) + require.NoError(t, err) + + // Claim the volumes and verify the claims were set + err = state.CSIVolumeClaim(105, volId0, alloc1, structs.CSIVolumeClaimWrite) + require.NoError(t, err) + err = state.CSIVolumeClaim(106, volId0, alloc2, structs.CSIVolumeClaimRead) + require.NoError(t, err) + vol, err = state.CSIVolumeByID(ws, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 1) + require.Len(t, vol.WriteAllocs, 1) + + // Update the 1st alloc as failed/terminated + alloc1.ClientStatus = structs.AllocClientStatusFailed + err = state.UpdateAllocsFromClient(107, []*structs.Allocation{alloc1}) + require.NoError(t, err) + + // Create the GC eval we'd get from Node.UpdateAlloc + now := time.Now().UTC() + eval := &structs.Evaluation{ + ID: uuid.Generate(), + Namespace: job.Namespace, + Priority: structs.CoreJobPriority, + Type: structs.JobTypeCore, + TriggeredBy: structs.EvalTriggerAllocStop, + JobID: structs.CoreJobCSIVolumeClaimGC + ":" + job.ID, + LeaderACL: srv.getLeaderAcl(), + Status: structs.EvalStatusPending, + CreateTime: now.UTC().UnixNano(), + ModifyTime: now.UTC().UnixNano(), + } + + // Process the eval + snap, err := state.Snapshot() + require.NoError(t, err) + core := NewCoreScheduler(srv, snap) + err = core.Process(eval) + require.NoError(t, err) + + // Verify the claim was released + vol, err = state.CSIVolumeByID(ws, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 1) + require.Len(t, vol.WriteAllocs, 0) +} diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 7477e088b..4d7b9919a 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -511,6 +511,7 @@ func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, r // controllerUnpublishVolume sends an unpublish request to the CSI // controller plugin associated with a volume, if any. +// TODO: the only caller of this won't have an alloc pointer handy, should it be its own request arg type? func (srv *Server) controllerUnpublishVolume(req *structs.CSIVolumeClaimRequest, nodeID string) error { plug, vol, err := srv.volAndPluginLookup(req.VolumeID) if plug == nil || vol == nil || err != nil { diff --git a/nomad/leader.go b/nomad/leader.go index 98529fcc1..b43d4abd2 100644 --- a/nomad/leader.go +++ b/nomad/leader.go @@ -519,8 +519,6 @@ func (s *Server) schedulePeriodic(stopCh chan struct{}) { defer jobGC.Stop() deploymentGC := time.NewTicker(s.config.DeploymentGCInterval) defer deploymentGC.Stop() - csiVolumePublicationGC := time.NewTicker(s.config.CSIVolumePublicationGCInterval) - defer csiVolumePublicationGC.Stop() // getLatest grabs the latest index from the state store. It returns true if // the index was retrieved successfully. @@ -553,10 +551,6 @@ func (s *Server) schedulePeriodic(stopCh chan struct{}) { if index, ok := getLatest(); ok { s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobDeploymentGC, index)) } - case <-csiVolumePublicationGC.C: - if index, ok := getLatest(); ok { - s.evalBroker.Enqueue(s.coreJobEval(structs.CoreJobCSIVolumePublicationGC, index)) - } case <-stopCh: return } diff --git a/nomad/node_endpoint.go b/nomad/node_endpoint.go index 96acf2946..d8348a79e 100644 --- a/nomad/node_endpoint.go +++ b/nomad/node_endpoint.go @@ -1081,6 +1081,10 @@ func (n *Node) UpdateAlloc(args *structs.AllocUpdateRequest, reply *structs.Gene now := time.Now() var evals []*structs.Evaluation + // A set of de-duplicated IDs for jobs that need volume claim GC. + // Later we'll create a gc eval for each job. + jobsWithVolumeGCs := make(map[string]*structs.Job) + for _, allocToUpdate := range args.Alloc { allocToUpdate.ModifyTime = now.UTC().UnixNano() @@ -1108,11 +1112,12 @@ func (n *Node) UpdateAlloc(args *structs.AllocUpdateRequest, reply *structs.Gene continue } - err = n.unclaimVolumesForTerminalAllocs(args, alloc, taskGroup) - if err != nil { - n.logger.Error("UpdateAlloc unable to release CSI volume", - "alloc", alloc.ID, "error", err) - continue + // If the terminal alloc has CSI volumes, add its job to the list + // of jobs we're going to call volume claim GC on. + for _, vol := range taskGroup.Volumes { + if vol.Type == structs.VolumeTypeCSI { + jobsWithVolumeGCs[job.ID] = job + } } // Add an evaluation if this is a failed alloc that is eligible for rescheduling @@ -1132,6 +1137,26 @@ func (n *Node) UpdateAlloc(args *structs.AllocUpdateRequest, reply *structs.Gene } } + // Add an evaluation for garbage collecting the the CSI volume claims + // of jobs with terminal allocs + for _, job := range jobsWithVolumeGCs { + // we have to build this eval by hand rather than calling srv.CoreJob + // here because we need to use the alloc's namespace + eval := &structs.Evaluation{ + ID: uuid.Generate(), + Namespace: job.Namespace, + Priority: structs.CoreJobPriority, + Type: structs.JobTypeCore, + TriggeredBy: structs.EvalTriggerAllocStop, + JobID: structs.CoreJobCSIVolumeClaimGC + ":" + job.ID, + LeaderACL: n.srv.getLeaderAcl(), + Status: structs.EvalStatusPending, + CreateTime: now.UTC().UnixNano(), + ModifyTime: now.UTC().UnixNano(), + } + evals = append(evals, eval) + } + // Add this to the batch n.updatesLock.Lock() n.updates = append(n.updates, args.Alloc...) @@ -1170,32 +1195,6 @@ func (n *Node) UpdateAlloc(args *structs.AllocUpdateRequest, reply *structs.Gene return nil } -// unclaimVolumesForTerminalAllocs unpublishes and unclaims CSI volumes -// that belong to the alloc if it is terminal. -func (n *Node) unclaimVolumesForTerminalAllocs(args *structs.AllocUpdateRequest, alloc *structs.Allocation, taskGroup *structs.TaskGroup) error { - for _, volume := range taskGroup.Volumes { - - // TODO(tgross): we also need to call ControllerUnpublishVolume CSI RPC here - // but the server-side CSI client + routing hasn't been implemented yet - - req := &structs.CSIVolumeClaimRequest{ - VolumeID: volume.Source, - AllocationID: alloc.ID, - Claim: structs.CSIVolumeClaimRelease, - WriteRequest: args.WriteRequest, - } - - resp, _, err := n.srv.raftApply(structs.CSIVolumeClaimRequestType, req) - if err != nil { - return err - } - if respErr, ok := resp.(error); ok { - return respErr - } - } - return nil -} - // batchUpdate is used to update all the allocations func (n *Node) batchUpdate(future *structs.BatchFuture, updates []*structs.Allocation, evals []*structs.Evaluation) { // Group pending evals by jobID to prevent creating unnecessary evals diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index 7f5f04882..478ea97ca 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -2322,21 +2322,18 @@ func TestClientEndpoint_UpdateAlloc_UnclaimVolumes(t *testing.T) { state := srv.fsm.State() ws := memdb.NewWatchSet() - // Create a client node with a plugin + // Create a client node, plugin, and volume node := mock.Node() + node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version node.CSINodePlugins = map[string]*structs.CSIInfo{ "csi-plugin-example": {PluginID: "csi-plugin-example", - Healthy: true, - NodeInfo: &structs.CSINodeInfo{}, + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + ControllerInfo: &structs.CSIControllerInfo{}, }, } - plugin := structs.NewCSIPlugin("csi-plugin-example", 1) - plugin.ControllerRequired = false - plugin.AddPlugin(node.ID, &structs.CSIInfo{}) err := state.UpsertNode(99, node) require.NoError(t, err) - - // Create the volume for the plugin volId0 := uuid.Generate() vols := []*structs.CSIVolume{{ ID: volId0, @@ -2344,19 +2341,20 @@ func TestClientEndpoint_UpdateAlloc_UnclaimVolumes(t *testing.T) { PluginID: "csi-plugin-example", AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, - Topologies: []*structs.CSITopology{{ - Segments: map[string]string{"foo": "bar"}, - }}, }} - err = state.CSIVolumeRegister(4, vols) + err = state.CSIVolumeRegister(100, vols) require.NoError(t, err) + vol, err := state.CSIVolumeByID(ws, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 0) + require.Len(t, vol.WriteAllocs, 0) // Create a job with 2 allocations job := mock.Job() job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ "_": { Name: "someVolume", - Type: "", + Type: structs.VolumeTypeCSI, Source: volId0, ReadOnly: false, }, @@ -2381,12 +2379,6 @@ func TestClientEndpoint_UpdateAlloc_UnclaimVolumes(t *testing.T) { err = state.UpsertAllocs(104, []*structs.Allocation{alloc1, alloc2}) require.NoError(t, err) - // Verify no claims are set - vol, err := state.CSIVolumeByID(ws, volId0) - require.NoError(t, err) - require.Len(t, vol.ReadAllocs, 0) - require.Len(t, vol.WriteAllocs, 0) - // Claim the volumes and verify the claims were set err = state.CSIVolumeClaim(105, volId0, alloc1, structs.CSIVolumeClaimWrite) require.NoError(t, err) @@ -2411,11 +2403,11 @@ func TestClientEndpoint_UpdateAlloc_UnclaimVolumes(t *testing.T) { require.NoError(t, err) require.Equal(t, structs.AllocClientStatusFailed, out.ClientStatus) - // Verify the claim was released - vol, err = state.CSIVolumeByID(ws, volId0) - require.NoError(t, err) - require.Len(t, vol.ReadAllocs, 1) - require.Len(t, vol.WriteAllocs, 0) + // Verify the eval for the claim GC was emitted + // Lookup the evaluations + eval, err := state.EvalsByJob(ws, job.Namespace, structs.CoreJobCSIVolumeClaimGC+":"+job.ID) + require.NotNil(t, eval) + require.Nil(t, err) } func TestClientEndpoint_CreateNodeEvals(t *testing.T) { diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 96cc3ae6a..4980382a6 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1929,14 +1929,6 @@ func (s *StateStore) CSIVolumeDenormalize(ws memdb.WatchSet, vol *structs.CSIVol vol.WriteAllocs[id] = a } - for id := range vol.PastAllocs { - a, err := s.AllocByID(ws, id) - if err != nil { - return nil, err - } - vol.PastAllocs[id] = a - } - return vol, nil } diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 1ea75ae01..cc0bbaee7 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -146,7 +146,6 @@ type CSIVolume struct { // Allocations, tracking claim status ReadAllocs map[string]*Allocation WriteAllocs map[string]*Allocation - PastAllocs map[string]*Allocation // Healthy is true if all the denormalized plugin health fields are true, and the // volume has not been marked for garbage collection @@ -203,7 +202,6 @@ func (v *CSIVolume) newStructs() { v.ReadAllocs = map[string]*Allocation{} v.WriteAllocs = map[string]*Allocation{} - v.PastAllocs = map[string]*Allocation{} } func (v *CSIVolume) Stub() *CSIVolListStub { @@ -265,10 +263,6 @@ func (v *CSIVolume) Copy() *CSIVolume { out.WriteAllocs[k] = v } - for k, v := range v.PastAllocs { - out.PastAllocs[k] = v - } - return out } @@ -294,7 +288,6 @@ func (v *CSIVolume) ClaimRead(alloc *Allocation) bool { // pointer. We'll get it from the db in denormalize. v.ReadAllocs[alloc.ID] = nil delete(v.WriteAllocs, alloc.ID) - delete(v.PastAllocs, alloc.ID) return true } @@ -307,7 +300,6 @@ func (v *CSIVolume) ClaimWrite(alloc *Allocation) bool { // pointer. We'll get it from the db in denormalize. v.WriteAllocs[alloc.ID] = nil delete(v.ReadAllocs, alloc.ID) - delete(v.PastAllocs, alloc.ID) return true } @@ -315,19 +307,9 @@ func (v *CSIVolume) ClaimWrite(alloc *Allocation) bool { func (v *CSIVolume) ClaimRelease(alloc *Allocation) bool { delete(v.ReadAllocs, alloc.ID) delete(v.WriteAllocs, alloc.ID) - // Allocations are copy on write, so we want to keep the id but don't need the - // pointer. We'll get it from the db in denormalize. - v.PastAllocs[alloc.ID] = nil return true } -// GCAlloc is called on Allocation gc, by following the alloc's pointer back to the volume -func (v *CSIVolume) GCAlloc(alloc *Allocation) { - delete(v.ReadAllocs, alloc.ID) - delete(v.WriteAllocs, alloc.ID) - delete(v.PastAllocs, alloc.ID) -} - // Equality by value func (v *CSIVolume) Equal(o *CSIVolume) bool { if v == nil || o == nil { diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index d993cc7c9..7a758605a 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -8694,10 +8694,10 @@ const ( // check if they are terminal. If so, we delete these out of the system. CoreJobDeploymentGC = "deployment-gc" - // CoreJobCSIVolumePublicationGC is use for the garbage collection of CSI - // volume publications. We periodically scan volumes to see if no allocs are - // claiming them. If so, we unpublish the volume. - CoreJobCSIVolumePublicationGC = "csi-volume-publication-gc" + // CoreJobCSIVolumeClaimGC is use for the garbage collection of CSI + // volume claims. We periodically scan volumes to see if no allocs are + // claiming them. If so, we unclaim the volume. + CoreJobCSIVolumeClaimGC = "csi-volume-claim-gc" // CoreJobForceGC is used to force garbage collection of all GCable objects. CoreJobForceGC = "force-gc" From 5b05baf9f6c9e06577e6b2aa08a3d4d2811bc51c Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 21 Feb 2020 10:37:12 +0100 Subject: [PATCH 098/126] csi: Add /dev mounts to CSI Plugins CSI Plugins that manage devices need not just access to the CSI directory, but also to manage devices inside `/dev`. This commit introduces a `/dev:/dev` mount to the container so that they may do so. --- client/allocrunner/taskrunner/plugin_supervisor_hook.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index e66cb95e4..dde88586f 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -113,8 +113,15 @@ func (h *csiPluginSupervisorHook) Prestart(ctx context.Context, Readonly: false, PropagationMode: "bidirectional", } + devMount := &drivers.MountConfig{ + TaskPath: "/dev", + HostPath: "/dev", + Readonly: false, + } mounts := ensureMountpointInserted(h.runner.hookResources.getMounts(), configMount) + mounts = ensureMountpointInserted(mounts, devMount) + h.runner.hookResources.setMounts(mounts) resp.Done = true From d5e255f97af46d2e0bd22643dc768200848df020 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 21 Feb 2020 10:48:19 +0100 Subject: [PATCH 099/126] client: Rename ClientCSI -> CSIController --- ..._endpoint.go => csi_controller_endpoint.go} | 18 +++++++++--------- ...test.go => csi_controller_endpoint_test.go} | 8 ++++---- client/rpc.go | 14 +++++++------- 3 files changed, 20 insertions(+), 20 deletions(-) rename client/{client_csi_endpoint.go => csi_controller_endpoint.go} (80%) rename client/{client_csi_endpoint_test.go => csi_controller_endpoint_test.go} (96%) diff --git a/client/client_csi_endpoint.go b/client/csi_controller_endpoint.go similarity index 80% rename from client/client_csi_endpoint.go rename to client/csi_controller_endpoint.go index 0922f0dc6..5aaf66b6e 100644 --- a/client/client_csi_endpoint.go +++ b/client/csi_controller_endpoint.go @@ -13,9 +13,9 @@ import ( "github.com/hashicorp/nomad/plugins/csi" ) -// ClientCSI endpoint is used for interacting with CSI plugins on a client. +// CSIController endpoint is used for interacting with CSI plugins on a client. // TODO: Submit metrics with labels to allow debugging per plugin perf problems. -type ClientCSI struct { +type CSIController struct { c *Client } @@ -30,10 +30,10 @@ var ( ErrPluginTypeError = errors.New("CSI Plugin loaded incorrectly") ) -// CSIControllerValidateVolume is used during volume registration to validate +// ValidateVolume is used during volume registration to validate // that a volume exists and that the capabilities it was registered with are // supported by the CSI Plugin and external volume configuration. -func (c *ClientCSI) CSIControllerValidateVolume(req *structs.ClientCSIControllerValidateVolumeRequest, resp *structs.ClientCSIControllerValidateVolumeResponse) error { +func (c *CSIController) ValidateVolume(req *structs.ClientCSIControllerValidateVolumeRequest, resp *structs.ClientCSIControllerValidateVolumeResponse) error { defer metrics.MeasureSince([]string{"client", "csi_controller", "validate_volume"}, time.Now()) if req.VolumeID == "" { @@ -60,7 +60,7 @@ func (c *ClientCSI) CSIControllerValidateVolume(req *structs.ClientCSIController return plugin.ControllerValidateCapabilties(ctx, req.VolumeID, caps) } -// CSIControllerAttachVolume is used to attach a volume from a CSI Cluster to +// AttachVolume is used to attach a volume from a CSI Cluster to // the storage node provided in the request. // // The controller attachment flow currently works as follows: @@ -68,7 +68,7 @@ func (c *ClientCSI) CSIControllerValidateVolume(req *structs.ClientCSIController // 2. Call ControllerPublishVolume on the CSI Plugin to trigger a remote attachment // // In the future this may be expanded to request dynamic secrets for attachement. -func (c *ClientCSI) CSIControllerAttachVolume(req *structs.ClientCSIControllerAttachVolumeRequest, resp *structs.ClientCSIControllerAttachVolumeResponse) error { +func (c *CSIController) AttachVolume(req *structs.ClientCSIControllerAttachVolumeRequest, resp *structs.ClientCSIControllerAttachVolumeResponse) error { defer metrics.MeasureSince([]string{"client", "csi_controller", "publish_volume"}, time.Now()) plugin, err := c.findControllerPlugin(req.PluginName) if err != nil { @@ -109,12 +109,12 @@ func (c *ClientCSI) CSIControllerAttachVolume(req *structs.ClientCSIControllerAt return nil } -func (c *ClientCSI) findControllerPlugin(name string) (csi.CSIPlugin, error) { +func (c *CSIController) findControllerPlugin(name string) (csi.CSIPlugin, error) { return c.findPlugin(dynamicplugins.PluginTypeCSIController, name) } // TODO: Cache Plugin Clients? -func (c *ClientCSI) findPlugin(ptype, name string) (csi.CSIPlugin, error) { +func (c *CSIController) findPlugin(ptype, name string) (csi.CSIPlugin, error) { pIface, err := c.c.dynamicRegistry.DispensePlugin(ptype, name) if err != nil { return nil, err @@ -128,6 +128,6 @@ func (c *ClientCSI) findPlugin(ptype, name string) (csi.CSIPlugin, error) { return plugin, nil } -func (c *ClientCSI) requestContext() (context.Context, context.CancelFunc) { +func (c *CSIController) requestContext() (context.Context, context.CancelFunc) { return context.WithTimeout(context.Background(), CSIPluginRequestTimeout) } diff --git a/client/client_csi_endpoint_test.go b/client/csi_controller_endpoint_test.go similarity index 96% rename from client/client_csi_endpoint_test.go rename to client/csi_controller_endpoint_test.go index 4e5da6bd6..db801b1da 100644 --- a/client/client_csi_endpoint_test.go +++ b/client/csi_controller_endpoint_test.go @@ -18,7 +18,7 @@ var fakePlugin = &dynamicplugins.PluginInfo{ ConnectionInfo: &dynamicplugins.PluginConnectionInfo{}, } -func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { +func TestCSIController_AttachVolume(t *testing.T) { t.Parallel() cases := []struct { @@ -139,7 +139,7 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { require.Nil(err) var resp structs.ClientCSIControllerAttachVolumeResponse - err = client.ClientRPC("ClientCSI.CSIControllerAttachVolume", tc.Request, &resp) + err = client.ClientRPC("CSIController.AttachVolume", tc.Request, &resp) require.Equal(tc.ExpectedErr, err) if tc.ExpectedResponse != nil { require.Equal(tc.ExpectedResponse, &resp) @@ -148,7 +148,7 @@ func TestClientCSI_CSIControllerAttachVolume(t *testing.T) { } } -func TestClientCSI_CSIControllerValidateVolume(t *testing.T) { +func TestCSIController_ValidateVolume(t *testing.T) { t.Parallel() cases := []struct { @@ -228,7 +228,7 @@ func TestClientCSI_CSIControllerValidateVolume(t *testing.T) { require.Nil(err) var resp structs.ClientCSIControllerValidateVolumeResponse - err = client.ClientRPC("ClientCSI.CSIControllerValidateVolume", tc.Request, &resp) + err = client.ClientRPC("CSIController.ValidateVolume", tc.Request, &resp) require.Equal(tc.ExpectedErr, err) if tc.ExpectedResponse != nil { require.Equal(tc.ExpectedResponse, &resp) diff --git a/client/rpc.go b/client/rpc.go index 31589e3e8..04ee73c44 100644 --- a/client/rpc.go +++ b/client/rpc.go @@ -20,11 +20,11 @@ import ( // rpcEndpoints holds the RPC endpoints type rpcEndpoints struct { - ClientStats *ClientStats - ClientCSI *ClientCSI - FileSystem *FileSystem - Allocations *Allocations - Agent *Agent + ClientStats *ClientStats + CSIController *CSIController + FileSystem *FileSystem + Allocations *Allocations + Agent *Agent } // ClientRPC is used to make a local, client only RPC call @@ -218,7 +218,7 @@ func (c *Client) streamingRpcConn(server *servers.Server, method string) (net.Co func (c *Client) setupClientRpc() { // Initialize the RPC handlers c.endpoints.ClientStats = &ClientStats{c} - c.endpoints.ClientCSI = &ClientCSI{c} + c.endpoints.CSIController = &CSIController{c} c.endpoints.FileSystem = NewFileSystemEndpoint(c) c.endpoints.Allocations = NewAllocationsEndpoint(c) c.endpoints.Agent = NewAgentEndpoint(c) @@ -236,7 +236,7 @@ func (c *Client) setupClientRpc() { func (c *Client) setupClientRpcServer(server *rpc.Server) { // Register the endpoints server.Register(c.endpoints.ClientStats) - server.Register(c.endpoints.ClientCSI) + server.Register(c.endpoints.CSIController) server.Register(c.endpoints.FileSystem) server.Register(c.endpoints.Allocations) server.Register(c.endpoints.Agent) From e75f057df30c011f5f47d9d95e1bd3607d2bf794 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 21 Feb 2020 11:32:10 +0100 Subject: [PATCH 100/126] csi: Fix Controller RPCs Currently the handling of CSINode RPCs does not correctly handle forwarding RPCs to Nodes. This commit fixes this by introducing a shim RPC (nomad/client_csi_enpdoint) that will correctly forward the request to the owning node, or submit the RPC to the client. In the process it also cleans up handling a little bit by adding the `CSIControllerQuery` embeded struct for required forwarding state. The CSIControllerQuery embeding the requirement of a `PluginID` also means we could move node targetting into the shim RPC if wanted in the future. --- client/csi_controller_endpoint.go | 10 ++- client/csi_controller_endpoint_test.go | 94 ++++++++++++-------- client/structs/csi.go | 34 +++++--- nomad/client_csi_endpoint.go | 75 ++++++++++++++++ nomad/client_csi_endpoint_test.go | 93 ++++++++++++++++++++ nomad/csi_endpoint.go | 114 +++++++++++++++---------- nomad/csi_endpoint_test.go | 9 ++ nomad/server.go | 11 ++- 8 files changed, 345 insertions(+), 95 deletions(-) create mode 100644 nomad/client_csi_endpoint.go create mode 100644 nomad/client_csi_endpoint_test.go diff --git a/client/csi_controller_endpoint.go b/client/csi_controller_endpoint.go index 5aaf66b6e..f3a386ae4 100644 --- a/client/csi_controller_endpoint.go +++ b/client/csi_controller_endpoint.go @@ -70,7 +70,7 @@ func (c *CSIController) ValidateVolume(req *structs.ClientCSIControllerValidateV // In the future this may be expanded to request dynamic secrets for attachement. func (c *CSIController) AttachVolume(req *structs.ClientCSIControllerAttachVolumeRequest, resp *structs.ClientCSIControllerAttachVolumeResponse) error { defer metrics.MeasureSince([]string{"client", "csi_controller", "publish_volume"}, time.Now()) - plugin, err := c.findControllerPlugin(req.PluginName) + plugin, err := c.findControllerPlugin(req.PluginID) if err != nil { return err } @@ -85,8 +85,8 @@ func (c *CSIController) AttachVolume(req *structs.ClientCSIControllerAttachVolum return errors.New("VolumeID is required") } - if req.NodeID == "" { - return errors.New("NodeID is required") + if req.ClientCSINodeID == "" { + return errors.New("ClientCSINodeID is required") } if !nstructs.ValidCSIVolumeAccessMode(req.AccessMode) { @@ -109,6 +109,10 @@ func (c *CSIController) AttachVolume(req *structs.ClientCSIControllerAttachVolum return nil } +func (c *CSIController) DetachVolume(req *structs.ClientCSIControllerDetachVolumeRequest, resp *structs.ClientCSIControllerDetachVolumeResponse) error { + return fmt.Errorf("Unimplemented") +} + func (c *CSIController) findControllerPlugin(name string) (csi.CSIPlugin, error) { return c.findPlugin(dynamicplugins.PluginTypeCSIController, name) } diff --git a/client/csi_controller_endpoint_test.go b/client/csi_controller_endpoint_test.go index db801b1da..ca9129fb6 100644 --- a/client/csi_controller_endpoint_test.go +++ b/client/csi_controller_endpoint_test.go @@ -31,43 +31,53 @@ func TestCSIController_AttachVolume(t *testing.T) { { Name: "returns plugin not found errors", Request: &structs.ClientCSIControllerAttachVolumeRequest{ - PluginName: "some-garbage", + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: "some-garbage", + }, }, ExpectedErr: errors.New("plugin some-garbage for type csi-controller not found"), }, { Name: "validates volumeid is not empty", Request: &structs.ClientCSIControllerAttachVolumeRequest{ - PluginName: fakePlugin.Name, + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, }, ExpectedErr: errors.New("VolumeID is required"), }, { Name: "validates nodeid is not empty", Request: &structs.ClientCSIControllerAttachVolumeRequest{ - PluginName: fakePlugin.Name, - VolumeID: "1234-4321-1234-4321", + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", }, - ExpectedErr: errors.New("NodeID is required"), + ExpectedErr: errors.New("ClientCSINodeID is required"), }, { Name: "validates AccessMode", Request: &structs.ClientCSIControllerAttachVolumeRequest{ - PluginName: fakePlugin.Name, - VolumeID: "1234-4321-1234-4321", - NodeID: "abcde", - AccessMode: nstructs.CSIVolumeAccessMode("foo"), + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + AccessMode: nstructs.CSIVolumeAccessMode("foo"), }, ExpectedErr: errors.New("Unknown access mode: foo"), }, { Name: "validates attachmentmode is not empty", Request: &structs.ClientCSIControllerAttachVolumeRequest{ - PluginName: fakePlugin.Name, - VolumeID: "1234-4321-1234-4321", - NodeID: "abcde", - AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, - AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, + AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), }, ExpectedErr: errors.New("Unknown attachment mode: bar"), }, @@ -77,11 +87,13 @@ func TestCSIController_AttachVolume(t *testing.T) { fc.NextControllerPublishVolumeErr = errors.New("hello") }, Request: &structs.ClientCSIControllerAttachVolumeRequest{ - PluginName: fakePlugin.Name, - VolumeID: "1234-4321-1234-4321", - NodeID: "abcde", - AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, }, ExpectedErr: errors.New("hello"), }, @@ -91,11 +103,13 @@ func TestCSIController_AttachVolume(t *testing.T) { fc.NextControllerPublishVolumeResponse = &csi.ControllerPublishVolumeResponse{} }, Request: &structs.ClientCSIControllerAttachVolumeRequest{ - PluginName: fakePlugin.Name, - VolumeID: "1234-4321-1234-4321", - NodeID: "abcde", - AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, }, ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{}, }, @@ -107,11 +121,13 @@ func TestCSIController_AttachVolume(t *testing.T) { } }, Request: &structs.ClientCSIControllerAttachVolumeRequest{ - PluginName: fakePlugin.Name, - VolumeID: "1234-4321-1234-4321", - NodeID: "abcde", - AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, - AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, }, ExpectedResponse: &structs.ClientCSIControllerAttachVolumeResponse{ PublishContext: map[string]string{"foo": "bar"}, @@ -161,14 +177,18 @@ func TestCSIController_ValidateVolume(t *testing.T) { { Name: "validates volumeid is not empty", Request: &structs.ClientCSIControllerValidateVolumeRequest{ - PluginID: fakePlugin.Name, + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, }, ExpectedErr: errors.New("VolumeID is required"), }, { Name: "returns plugin not found errors", Request: &structs.ClientCSIControllerValidateVolumeRequest{ - PluginID: "some-garbage", + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: "some-garbage", + }, VolumeID: "foo", }, ExpectedErr: errors.New("plugin some-garbage for type csi-controller not found"), @@ -176,7 +196,9 @@ func TestCSIController_ValidateVolume(t *testing.T) { { Name: "validates attachmentmode", Request: &structs.ClientCSIControllerValidateVolumeRequest{ - PluginID: fakePlugin.Name, + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, VolumeID: "1234-4321-1234-4321", AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, @@ -186,7 +208,9 @@ func TestCSIController_ValidateVolume(t *testing.T) { { Name: "validates AccessMode", Request: &structs.ClientCSIControllerValidateVolumeRequest{ - PluginID: fakePlugin.Name, + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, VolumeID: "1234-4321-1234-4321", AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, AccessMode: nstructs.CSIVolumeAccessMode("foo"), @@ -199,7 +223,9 @@ func TestCSIController_ValidateVolume(t *testing.T) { fc.NextControllerValidateVolumeErr = errors.New("hello") }, Request: &structs.ClientCSIControllerValidateVolumeRequest{ - PluginID: fakePlugin.Name, + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, VolumeID: "1234-4321-1234-4321", AccessMode: nstructs.CSIVolumeAccessModeSingleNodeWriter, AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, diff --git a/client/structs/csi.go b/client/structs/csi.go index 6b9f891be..d0e96a588 100644 --- a/client/structs/csi.go +++ b/client/structs/csi.go @@ -20,27 +20,36 @@ type CSIVolumeMountOptions struct { MountFlags []string } -type ClientCSIControllerValidateVolumeRequest struct { +// CSIControllerQuery is used to specify various flags for queries against CSI +// Controllers +type CSIControllerQuery struct { + // ControllerNodeID is the node that should be targeted by the request + ControllerNodeID string + + // PluginID is the plugin that should be targeted on the given node. PluginID string +} + +type ClientCSIControllerValidateVolumeRequest struct { VolumeID string AttachmentMode structs.CSIVolumeAttachmentMode AccessMode structs.CSIVolumeAccessMode + + CSIControllerQuery } type ClientCSIControllerValidateVolumeResponse struct { } type ClientCSIControllerAttachVolumeRequest struct { - PluginName string - // The ID of the volume to be used on a node. // This field is REQUIRED. VolumeID string // The ID of the node. This field is REQUIRED. This must match the NodeID that // is fingerprinted by the target node for this plugin name. - NodeID string + ClientCSINodeID string // AttachmentMode indicates how the volume should be attached and mounted into // a task. @@ -56,6 +65,8 @@ type ClientCSIControllerAttachVolumeRequest struct { // ReadOnly indicates that the volume will be used in a readonly fashion. This // only works when the Controller has the PublishReadonly capability. ReadOnly bool + + CSIControllerQuery } func (c *ClientCSIControllerAttachVolumeRequest) ToCSIRequest() *csi.ControllerPublishVolumeRequest { @@ -65,7 +76,7 @@ func (c *ClientCSIControllerAttachVolumeRequest) ToCSIRequest() *csi.ControllerP return &csi.ControllerPublishVolumeRequest{ VolumeID: c.VolumeID, - NodeID: c.NodeID, + NodeID: c.ClientCSINodeID, ReadOnly: c.ReadOnly, } } @@ -88,15 +99,16 @@ type ClientCSIControllerAttachVolumeResponse struct { } type ClientCSIControllerDetachVolumeRequest struct { - PluginName string - // The ID of the volume to be unpublished for the node // This field is REQUIRED. VolumeID string - // The ID of the node. This field is REQUIRED. This must match the NodeID that - // is fingerprinted by the target node for this plugin name. - NodeID string + // The CSI Node ID for the Node that the volume should be detached from. + // This field is REQUIRED. This must match the NodeID that is fingerprinted + // by the target node for this plugin name. + ClientCSINodeID string + + CSIControllerQuery } func (c *ClientCSIControllerDetachVolumeRequest) ToCSIRequest() *csi.ControllerUnpublishVolumeRequest { @@ -106,7 +118,7 @@ func (c *ClientCSIControllerDetachVolumeRequest) ToCSIRequest() *csi.ControllerU return &csi.ControllerUnpublishVolumeRequest{ VolumeID: c.VolumeID, - NodeID: c.NodeID, + NodeID: c.ClientCSINodeID, } } diff --git a/nomad/client_csi_endpoint.go b/nomad/client_csi_endpoint.go new file mode 100644 index 000000000..061e6493d --- /dev/null +++ b/nomad/client_csi_endpoint.go @@ -0,0 +1,75 @@ +package nomad + +import ( + "errors" + "time" + + metrics "github.com/armon/go-metrics" + log "github.com/hashicorp/go-hclog" + cstructs "github.com/hashicorp/nomad/client/structs" +) + +// ClientCSIController is used to forward RPC requests to the targed Nomad client's +// CSIController endpoint. +type ClientCSIController struct { + srv *Server + logger log.Logger +} + +func (a *ClientCSIController) AttachVolume(args *cstructs.ClientCSIControllerAttachVolumeRequest, reply *cstructs.ClientCSIControllerAttachVolumeResponse) error { + defer metrics.MeasureSince([]string{"nomad", "client_csi_controller", "attach_volume"}, time.Now()) + + // Verify the arguments. + if args.ControllerNodeID == "" { + return errors.New("missing ControllerNodeID") + } + + // Make sure Node is valid and new enough to support RPC + snap, err := a.srv.State().Snapshot() + if err != nil { + return err + } + + _, err = getNodeForRpc(snap, args.ControllerNodeID) + if err != nil { + return err + } + + // Get the connection to the client + state, ok := a.srv.getNodeConn(args.ControllerNodeID) + if !ok { + return findNodeConnAndForward(a.srv, args.ControllerNodeID, "ClientCSIController.AttachVolume", args, reply) + } + + // Make the RPC + return NodeRpc(state.Session, "CSIController.AttachVolume", args, reply) +} + +func (a *ClientCSIController) ValidateVolume(args *cstructs.ClientCSIControllerValidateVolumeRequest, reply *cstructs.ClientCSIControllerValidateVolumeResponse) error { + defer metrics.MeasureSince([]string{"nomad", "client_csi_controller", "validate_volume"}, time.Now()) + + // Verify the arguments. + if args.ControllerNodeID == "" { + return errors.New("missing ControllerNodeID") + } + + // Make sure Node is valid and new enough to support RPC + snap, err := a.srv.State().Snapshot() + if err != nil { + return err + } + + _, err = getNodeForRpc(snap, args.ControllerNodeID) + if err != nil { + return err + } + + // Get the connection to the client + state, ok := a.srv.getNodeConn(args.ControllerNodeID) + if !ok { + return findNodeConnAndForward(a.srv, args.ControllerNodeID, "ClientCSIController.ValidateVolume", args, reply) + } + + // Make the RPC + return NodeRpc(state.Session, "CSIController.ValidateVolume", args, reply) +} diff --git a/nomad/client_csi_endpoint_test.go b/nomad/client_csi_endpoint_test.go new file mode 100644 index 000000000..48774fc12 --- /dev/null +++ b/nomad/client_csi_endpoint_test.go @@ -0,0 +1,93 @@ +package nomad + +import ( + "testing" + + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" + "github.com/hashicorp/nomad/client" + "github.com/hashicorp/nomad/client/config" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/testutil" + "github.com/stretchr/testify/require" +) + +func TestClientCSIController_AttachVolume_Local(t *testing.T) { + t.Parallel() + require := require.New(t) + + // Start a server and client + s, cleanupS := TestServer(t, nil) + defer cleanupS() + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + c, cleanupC := client.TestClient(t, func(c *config.Config) { + c.Servers = []string{s.config.RPCAddr.String()} + }) + defer cleanupC() + + testutil.WaitForResult(func() (bool, error) { + nodes := s.connectedNodes() + return len(nodes) == 1, nil + }, func(err error) { + require.Fail("should have a client") + }) + + req := &cstructs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: cstructs.CSIControllerQuery{ControllerNodeID: c.NodeID()}, + } + + // Fetch the response + var resp structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "ClientCSIController.AttachVolume", req, &resp) + require.NotNil(err) + // Should recieve an error from the client endpoint + require.Contains(err.Error(), "must specify plugin name to dispense") +} + +func TestClientCSIController_AttachVolume_Forwarded(t *testing.T) { + t.Parallel() + require := require.New(t) + + // Start a server and client + s1, cleanupS1 := TestServer(t, nil) + defer cleanupS1() + s2, cleanupS2 := TestServer(t, func(c *Config) { + c.DevDisableBootstrap = true + }) + defer cleanupS2() + TestJoin(t, s1, s2) + testutil.WaitForLeader(t, s1.RPC) + testutil.WaitForLeader(t, s2.RPC) + codec := rpcClient(t, s2) + + c, cleanupC := client.TestClient(t, func(c *config.Config) { + c.Servers = []string{s2.config.RPCAddr.String()} + c.GCDiskUsageThreshold = 100.0 + }) + defer cleanupC() + + testutil.WaitForResult(func() (bool, error) { + nodes := s2.connectedNodes() + return len(nodes) == 1, nil + }, func(err error) { + require.Fail("should have a client") + }) + + // Force remove the connection locally in case it exists + s1.nodeConnsLock.Lock() + delete(s1.nodeConns, c.NodeID()) + s1.nodeConnsLock.Unlock() + + req := &cstructs.ClientCSIControllerAttachVolumeRequest{ + CSIControllerQuery: cstructs.CSIControllerQuery{ControllerNodeID: c.NodeID()}, + } + + // Fetch the response + var resp structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "ClientCSIController.AttachVolume", req, &resp) + require.NotNil(err) + // Should recieve an error from the client endpoint + require.Contains(err.Error(), "must specify plugin name to dispense") +} diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 4d7b9919a..91aa6a943 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -222,16 +222,26 @@ func (srv *Server) controllerValidateVolume(req *structs.CSIVolumeRegisterReques // The plugin requires a controller. Now we do some validation of the Volume // to ensure that the registered capabilities are valid and that the volume // exists. - method := "ClientCSI.CSIControllerValidateVolume" + + // plugin IDs are not scoped to region/DC but volumes are. + // so any node we get for a controller is already in the same region/DC + // for the volume. + nodeID, err := srv.nodeForControllerPlugin(plugin) + if err != nil || nodeID == "" { + return err + } + + method := "ClientCSIController.ValidateVolume" cReq := &cstructs.ClientCSIControllerValidateVolumeRequest{ - PluginID: plugin.ID, VolumeID: vol.ID, AttachmentMode: vol.AttachmentMode, AccessMode: vol.AccessMode, } + cReq.PluginID = plugin.ID + cReq.ControllerNodeID = nodeID cResp := &cstructs.ClientCSIControllerValidateVolumeResponse{} - return srv.csiControllerRPC(plugin, method, cReq, cResp) + return srv.RPC(method, cReq, cResp) } // Register registers a new volume @@ -483,25 +493,42 @@ func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, r return nil } - method := "ClientCSI.CSIControllerAttachVolume" + // plugin IDs are not scoped to region/DC but volumes are. + // so any node we get for a controller is already in the same region/DC + // for the volume. + nodeID, err := srv.nodeForControllerPlugin(plug) + if err != nil || nodeID == "" { + return err + } + + targetNode, err := state.NodeByID(ws, alloc.NodeID) + if err != nil { + return err + } + if targetNode == nil { + return fmt.Errorf("%s: %s", structs.ErrUnknownNodePrefix, alloc.NodeID) + } + targetCSIInfo, ok := targetNode.CSINodePlugins[plug.ID] + if !ok { + return fmt.Errorf("Failed to find NodeInfo for node: %s", targetNode.ID) + } + + method := "ClientCSIController.AttachVolume" cReq := &cstructs.ClientCSIControllerAttachVolumeRequest{ - PluginName: plug.ID, - VolumeID: req.VolumeID, - NodeID: alloc.NodeID, - AttachmentMode: vol.AttachmentMode, - AccessMode: vol.AccessMode, - ReadOnly: req.Claim == structs.CSIVolumeClaimRead, + VolumeID: req.VolumeID, + ClientCSINodeID: targetCSIInfo.NodeInfo.ID, + AttachmentMode: vol.AttachmentMode, + AccessMode: vol.AccessMode, + ReadOnly: req.Claim == structs.CSIVolumeClaimRead, // TODO(tgross): we don't have a way of setting these yet. // ref https://github.com/hashicorp/nomad/issues/7007 // MountOptions: vol.MountOptions, } + cReq.PluginID = plug.ID + cReq.ControllerNodeID = nodeID cResp := &cstructs.ClientCSIControllerAttachVolumeResponse{} - // CSI controller plugins can block for arbitrarily long times, - // but we need to make sure it completes before we can safely - // mark the volume as claimed and return to the client so it - // can do a `NodePublish`. - err = srv.csiControllerRPC(plug, method, cReq, cResp) + err = srv.RPC(method, cReq, cResp) if err != nil { return err } @@ -512,24 +539,43 @@ func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, r // controllerUnpublishVolume sends an unpublish request to the CSI // controller plugin associated with a volume, if any. // TODO: the only caller of this won't have an alloc pointer handy, should it be its own request arg type? -func (srv *Server) controllerUnpublishVolume(req *structs.CSIVolumeClaimRequest, nodeID string) error { +func (srv *Server) controllerUnpublishVolume(req *structs.CSIVolumeClaimRequest, targetNomadNodeID string) error { plug, vol, err := srv.volAndPluginLookup(req.VolumeID) if plug == nil || vol == nil || err != nil { return err // possibly nil if no controller required } - method := "ClientCSI.DetachVolume" - cReq := &cstructs.ClientCSIControllerDetachVolumeRequest{ - PluginName: plug.ID, - VolumeID: req.VolumeID, - NodeID: nodeID, - } - err = srv.csiControllerRPC(plug, method, cReq, - &cstructs.ClientCSIControllerDetachVolumeResponse{}) + ws := memdb.NewWatchSet() + state := srv.State() + + targetNode, err := state.NodeByID(ws, targetNomadNodeID) if err != nil { return err } - return nil + if targetNode == nil { + return fmt.Errorf("%s: %s", structs.ErrUnknownNodePrefix, targetNomadNodeID) + } + targetCSIInfo, ok := targetNode.CSINodePlugins[plug.ID] + if !ok { + return fmt.Errorf("Failed to find NodeInfo for node: %s", targetNode.ID) + } + + // plugin IDs are not scoped to region/DC but volumes are. + // so any node we get for a controller is already in the same region/DC + // for the volume. + nodeID, err := srv.nodeForControllerPlugin(plug) + if err != nil || nodeID == "" { + return err + } + + method := "ClientCSIController.DetachVolume" + cReq := &cstructs.ClientCSIControllerDetachVolumeRequest{ + VolumeID: req.VolumeID, + ClientCSINodeID: targetCSIInfo.NodeInfo.ID, + } + cReq.PluginID = plug.ID + cReq.ControllerNodeID = nodeID + return srv.RPC(method, cReq, &cstructs.ClientCSIControllerDetachVolumeResponse{}) } func (srv *Server) volAndPluginLookup(volID string) (*structs.CSIPlugin, *structs.CSIVolume, error) { @@ -560,24 +606,6 @@ func (srv *Server) volAndPluginLookup(volID string) (*structs.CSIPlugin, *struct return plug, vol, nil } -func (srv *Server) csiControllerRPC(plugin *structs.CSIPlugin, method string, args, reply interface{}) error { - // plugin IDs are not scoped to region/DC but volumes are. - // so any node we get for a controller is already in the same region/DC - // for the volume. - nodeID, err := srv.nodeForControllerPlugin(plugin) - if err != nil || nodeID == "" { - return err - } - err = findNodeConnAndForward(srv, nodeID, method, args, reply) - if err != nil { - return err - } - if replyErr, ok := reply.(error); ok { - return replyErr - } - return nil -} - // nodeForControllerPlugin returns the node ID for a random controller // to load-balance long-blocking RPCs across client nodes. func (srv *Server) nodeForControllerPlugin(plugin *structs.CSIPlugin) (string, error) { diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 8c1905084..a3320c836 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -336,6 +336,14 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { RequiresControllerPlugin: true, }, } + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": {PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{}, + NodeInfo: &structs.CSINodeInfo{}, + RequiresControllerPlugin: true, + }, + } err := state.UpsertNode(1002, node) require.NoError(t, err) vols := []*structs.CSIVolume{{ @@ -367,6 +375,7 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { } claimResp := &structs.CSIVolumeClaimResponse{} err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) + // Because the node is not registered require.EqualError(t, err, "No path to node") } diff --git a/nomad/server.go b/nomad/server.go index 41ffe8cbe..921d9901b 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -261,10 +261,11 @@ type endpoints struct { Enterprise *EnterpriseEndpoints // Client endpoints - ClientStats *ClientStats - FileSystem *FileSystem - Agent *Agent - ClientAllocations *ClientAllocations + ClientStats *ClientStats + FileSystem *FileSystem + Agent *Agent + ClientAllocations *ClientAllocations + ClientCSIController *ClientCSIController } // NewServer is used to construct a new Nomad server from the @@ -1110,6 +1111,7 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { s.staticEndpoints.ClientStats = &ClientStats{srv: s, logger: s.logger.Named("client_stats")} s.staticEndpoints.ClientAllocations = &ClientAllocations{srv: s, logger: s.logger.Named("client_allocs")} s.staticEndpoints.ClientAllocations.register() + s.staticEndpoints.ClientCSIController = &ClientCSIController{srv: s, logger: s.logger.Named("client_csi")} // Streaming endpoints s.staticEndpoints.FileSystem = &FileSystem{srv: s, logger: s.logger.Named("client_fs")} @@ -1137,6 +1139,7 @@ func (s *Server) setupRpcServer(server *rpc.Server, ctx *RPCContext) { s.staticEndpoints.Enterprise.Register(server) server.Register(s.staticEndpoints.ClientStats) server.Register(s.staticEndpoints.ClientAllocations) + server.Register(s.staticEndpoints.ClientCSIController) server.Register(s.staticEndpoints.FileSystem) server.Register(s.staticEndpoints.Agent) From 247e86bb351e430ae9c741d96ab87ac7292279d4 Mon Sep 17 00:00:00 2001 From: Danielle Lancashire Date: Fri, 21 Feb 2020 11:55:41 +0100 Subject: [PATCH 101/126] csi: VolumeCapabilities for ControllerPublishVolume This commit introduces support for providing VolumeCapabilities during requests to `ControllerPublishVolumes` as this is a required field. --- client/csi_controller_endpoint.go | 12 ++++-------- client/csi_controller_endpoint_test.go | 5 +++-- client/structs/csi.go | 18 ++++++++++++------ plugins/csi/plugin.go | 16 ++++++++-------- 4 files changed, 27 insertions(+), 24 deletions(-) diff --git a/client/csi_controller_endpoint.go b/client/csi_controller_endpoint.go index f3a386ae4..d724f72bd 100644 --- a/client/csi_controller_endpoint.go +++ b/client/csi_controller_endpoint.go @@ -9,7 +9,6 @@ import ( metrics "github.com/armon/go-metrics" "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/client/structs" - nstructs "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/csi" ) @@ -89,18 +88,15 @@ func (c *CSIController) AttachVolume(req *structs.ClientCSIControllerAttachVolum return errors.New("ClientCSINodeID is required") } - if !nstructs.ValidCSIVolumeAccessMode(req.AccessMode) { - return fmt.Errorf("Unknown access mode: %v", req.AccessMode) - } - - if !nstructs.ValidCSIVolumeAttachmentMode(req.AttachmentMode) { - return fmt.Errorf("Unknown attachment mode: %v", req.AttachmentMode) + csiReq, err := req.ToCSIRequest() + if err != nil { + return err } // Submit the request for a volume to the CSI Plugin. ctx, cancelFn := c.requestContext() defer cancelFn() - cresp, err := plugin.ControllerPublishVolume(ctx, req.ToCSIRequest()) + cresp, err := plugin.ControllerPublishVolume(ctx, csiReq) if err != nil { return err } diff --git a/client/csi_controller_endpoint_test.go b/client/csi_controller_endpoint_test.go index ca9129fb6..90795ba0d 100644 --- a/client/csi_controller_endpoint_test.go +++ b/client/csi_controller_endpoint_test.go @@ -64,9 +64,10 @@ func TestCSIController_AttachVolume(t *testing.T) { }, VolumeID: "1234-4321-1234-4321", ClientCSINodeID: "abcde", + AttachmentMode: nstructs.CSIVolumeAttachmentModeFilesystem, AccessMode: nstructs.CSIVolumeAccessMode("foo"), }, - ExpectedErr: errors.New("Unknown access mode: foo"), + ExpectedErr: errors.New("Unknown volume access mode: foo"), }, { Name: "validates attachmentmode is not empty", @@ -79,7 +80,7 @@ func TestCSIController_AttachVolume(t *testing.T) { AccessMode: nstructs.CSIVolumeAccessModeMultiNodeReader, AttachmentMode: nstructs.CSIVolumeAttachmentMode("bar"), }, - ExpectedErr: errors.New("Unknown attachment mode: bar"), + ExpectedErr: errors.New("Unknown volume attachment mode: bar"), }, { Name: "returns transitive errors", diff --git a/client/structs/csi.go b/client/structs/csi.go index d0e96a588..1a5b54286 100644 --- a/client/structs/csi.go +++ b/client/structs/csi.go @@ -69,16 +69,22 @@ type ClientCSIControllerAttachVolumeRequest struct { CSIControllerQuery } -func (c *ClientCSIControllerAttachVolumeRequest) ToCSIRequest() *csi.ControllerPublishVolumeRequest { +func (c *ClientCSIControllerAttachVolumeRequest) ToCSIRequest() (*csi.ControllerPublishVolumeRequest, error) { if c == nil { - return &csi.ControllerPublishVolumeRequest{} + return &csi.ControllerPublishVolumeRequest{}, nil + } + + caps, err := csi.VolumeCapabilityFromStructs(c.AttachmentMode, c.AccessMode) + if err != nil { + return nil, err } return &csi.ControllerPublishVolumeRequest{ - VolumeID: c.VolumeID, - NodeID: c.ClientCSINodeID, - ReadOnly: c.ReadOnly, - } + VolumeID: c.VolumeID, + NodeID: c.ClientCSINodeID, + ReadOnly: c.ReadOnly, + VolumeCapability: caps, + }, nil } type ClientCSIControllerAttachVolumeResponse struct { diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index c38198241..9eac22e25 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -228,10 +228,10 @@ func NewControllerCapabilitySet(resp *csipbv1.ControllerGetCapabilitiesResponse) } type ControllerPublishVolumeRequest struct { - VolumeID string - NodeID string - ReadOnly bool - //TODO: Add Capabilities + VolumeID string + NodeID string + ReadOnly bool + VolumeCapability *VolumeCapability } func (r *ControllerPublishVolumeRequest) ToCSIRepresentation() *csipbv1.ControllerPublishVolumeRequest { @@ -240,10 +240,10 @@ func (r *ControllerPublishVolumeRequest) ToCSIRepresentation() *csipbv1.Controll } return &csipbv1.ControllerPublishVolumeRequest{ - VolumeId: r.VolumeID, - NodeId: r.NodeID, - Readonly: r.ReadOnly, - // TODO: add capabilities + VolumeId: r.VolumeID, + NodeId: r.NodeID, + Readonly: r.ReadOnly, + VolumeCapability: r.VolumeCapability.ToCSIRepresentation(), } } From a4784ef2582a9ec0a2694ad48ad4e7510477450a Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Fri, 21 Feb 2020 14:48:16 -0500 Subject: [PATCH 102/126] csi add allocation context to fingerprinting results (#7133) * structs: CSIInfo include AllocID, CSIPlugins no Jobs * state_store: eliminate plugin Jobs, delete an empty plugin * nomad/structs/csi: detect empty plugins correctly * client/allocrunner/taskrunner/plugin_supervisor_hook: option AllocID * client/pluginmanager/csimanager/instance: allocID * client/pluginmanager/csimanager/fingerprint: set AllocID * client/node_updater: split controller and node plugins * api/csi: remove Jobs The CSI Plugin API will map plugins to allocations, which allows plugins to be defined by jobs in many configurations. In particular, multiple plugins can be defined in the same job, and multiple jobs can be used to define a single plugin. Because we now map the allocation context directly from the node, it's no longer necessary to track the jobs associated with a plugin directly. * nomad/csi_endpoint_test: CreateTestPlugin & register via fingerprint * client/dynamicplugins: lift AllocID into the struct from Options * api/csi_test: remove Jobs test * nomad/structs/csi: CSIPlugins has an array of allocs * nomad/state/state_store: implement CSIPluginDenormalize * nomad/state/state_store: CSIPluginDenormalize npe on missing alloc * nomad/csi_endpoint_test: defer deleteNodes for clarity * api/csi_test: disable this test awaiting mocks: https://github.com/hashicorp/nomad/issues/7123 --- api/csi.go | 18 +-- api/csi_test.go | 89 ++++------- .../taskrunner/plugin_supervisor_hook.go | 1 + client/dynamicplugins/registry.go | 3 + client/node_updater.go | 15 +- .../pluginmanager/csimanager/fingerprint.go | 1 + client/pluginmanager/csimanager/instance.go | 4 + nomad/csi_endpoint_test.go | 119 ++++++++------ nomad/state/state_store.go | 146 +++--------------- nomad/state/state_store_test.go | 47 ------ nomad/structs/csi.go | 67 ++------ nomad/structs/node.go | 1 + 12 files changed, 167 insertions(+), 344 deletions(-) diff --git a/api/csi.go b/api/csi.go index a3f4452fd..ca6474970 100644 --- a/api/csi.go +++ b/api/csi.go @@ -156,24 +156,18 @@ type CSIPlugins struct { } type CSIPlugin struct { - ID string - Type CSIPluginType - Namespace string - Jobs map[string]map[string]*Job - - ControllersHealthy int + ID string + // Map Node.ID to CSIInfo fingerprint results Controllers map[string]*CSIInfo - NodesHealthy int Nodes map[string]*CSIInfo - - CreateIndex uint64 - ModifyIndex uint64 + ControllersHealthy int + NodesHealthy int + CreateIndex uint64 + ModifyIndex uint64 } type CSIPluginListStub struct { ID string - Type CSIPluginType - JobIDs map[string]map[string]struct{} ControllersHealthy int ControllersExpected int NodesHealthy int diff --git a/api/csi_test.go b/api/csi_test.go index 03a64f039..eeea0f918 100644 --- a/api/csi_test.go +++ b/api/csi_test.go @@ -6,6 +6,11 @@ import ( "github.com/stretchr/testify/require" ) +// TestCSIVolumes_CRUD fails because of a combination of removing the job to plugin creation +// pathway and checking for plugin existence (but not yet health) at registration time. +// There are two possible solutions: +// 1. Expose the test server RPC server and force a Node.Update to fingerprint a plugin +// 2. Build and deploy a dummy CSI plugin via a job, and have it really fingerprint func TestCSIVolumes_CRUD(t *testing.T) { t.Parallel() c, s, root := makeACLClient(t, nil, nil) @@ -18,6 +23,9 @@ func TestCSIVolumes_CRUD(t *testing.T) { require.NotEqual(t, 0, qm.LastIndex) require.Equal(t, 0, len(vols)) + // FIXME we're bailing out here until one of the fixes is available + return + // Authorized QueryOpts. Use the root token to just bypass ACL details opts := &QueryOptions{ Region: "global", @@ -31,19 +39,30 @@ func TestCSIVolumes_CRUD(t *testing.T) { AuthToken: root.SecretID, } - // Register a plugin job - j := c.Jobs() - job := testJob() - job.Namespace = stringToPtr("default") - job.TaskGroups[0].Tasks[0].CSIPluginConfig = &TaskCSIPluginConfig{ - ID: "foo", - Type: "monolith", - MountDir: "/not-empty", - } - _, _, err = j.Register(job, wpts) + // Create node plugins + nodes, _, err := c.Nodes().List(nil) require.NoError(t, err) + require.Equal(t, 1, len(nodes)) + + nodeStub := nodes[0] + node, _, err := c.Nodes().Info(nodeStub.ID, nil) + require.NoError(t, err) + node.CSINodePlugins = map[string]*CSIInfo{ + "foo": { + PluginID: "foo", + Healthy: true, + RequiresControllerPlugin: false, + RequiresTopologies: false, + NodeInfo: &CSINodeInfo{ + ID: nodeStub.ID, + MaxVolumes: 200, + }, + }, + } // Register a volume + // This id is here as a string to avoid importing helper, which causes the lint + // rule that checks that the api package is isolated to fail id := "DEADBEEF-31B5-8F78-7986-DD404FDA0CD1" _, err = v.Register(&CSIVolume{ ID: id, @@ -80,53 +99,3 @@ func TestCSIVolumes_CRUD(t *testing.T) { vol, qm, err = v.Info(id, opts) require.Error(t, err, "missing") } - -func TestCSIPlugins_viaJob(t *testing.T) { - t.Parallel() - c, s, root := makeACLClient(t, nil, nil) - defer s.Stop() - p := c.CSIPlugins() - - // Successful empty result - plugs, qm, err := p.List(nil) - require.NoError(t, err) - require.NotEqual(t, 0, qm.LastIndex) - require.Equal(t, 0, len(plugs)) - - // Authorized QueryOpts. Use the root token to just bypass ACL details - opts := &QueryOptions{ - Region: "global", - Namespace: "default", - AuthToken: root.SecretID, - } - - wpts := &WriteOptions{ - Region: "global", - Namespace: "default", - AuthToken: root.SecretID, - } - - // Register a plugin job - j := c.Jobs() - job := testJob() - job.Namespace = stringToPtr("default") - job.TaskGroups[0].Tasks[0].CSIPluginConfig = &TaskCSIPluginConfig{ - ID: "foo", - Type: "monolith", - MountDir: "/not-empty", - } - _, _, err = j.Register(job, wpts) - require.NoError(t, err) - - // Successful result with the plugin - plugs, qm, err = p.List(opts) - require.NoError(t, err) - require.NotEqual(t, 0, qm.LastIndex) - require.Equal(t, 1, len(plugs)) - - // Successful info query - plug, qm, err := p.Info("foo", opts) - require.NoError(t, err) - require.NotNil(t, plug.Jobs[*job.Namespace][*job.ID]) - require.Equal(t, *job.ID, *plug.Jobs[*job.Namespace][*job.ID].ID) -} diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index dde88586f..f44ce5bf1 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -264,6 +264,7 @@ func (h *csiPluginSupervisorHook) registerPlugin(socketPath string) (func(), err ConnectionInfo: &dynamicplugins.PluginConnectionInfo{ SocketPath: socketPath, }, + AllocID: h.alloc.ID, Options: map[string]string{ "MountPoint": h.mountPoint, "ContainerMountPoint": h.task.CSIPluginConfig.MountDir, diff --git a/client/dynamicplugins/registry.go b/client/dynamicplugins/registry.go index 2bc4c4961..e9025e73a 100644 --- a/client/dynamicplugins/registry.go +++ b/client/dynamicplugins/registry.go @@ -53,6 +53,9 @@ type PluginInfo struct { // may not be exposed in the future. ConnectionInfo *PluginConnectionInfo + // AllocID tracks the allocation running the plugin + AllocID string + // Options is used for plugin registrations to pass further metadata along to // other subsystems Options map[string]string diff --git a/client/node_updater.go b/client/node_updater.go index 115150da5..a87714c92 100644 --- a/client/node_updater.go +++ b/client/node_updater.go @@ -346,9 +346,7 @@ func newBatchNodeUpdates( } // updateNodeFromCSI implements csimanager.UpdateNodeCSIInfoFunc and is used in -// the csi manager to send csi fingerprints to the server. Currently it registers -// all plugins as both controller and node plugins. -// TODO: separate node and controller plugin handling. +// the csi manager to send csi fingerprints to the server. func (b *batchNodeUpdates) updateNodeFromCSI(plugin string, info *structs.CSIInfo) { b.csiMu.Lock() defer b.csiMu.Unlock() @@ -357,8 +355,15 @@ func (b *batchNodeUpdates) updateNodeFromCSI(plugin string, info *structs.CSIInf return } - b.csiNodePlugins[plugin] = info - b.csiControllerPlugins[plugin] = info + // Only one of these is expected to be set, but a future implementation that + // explicitly models monolith plugins with a single fingerprinter may set both + if info.ControllerInfo != nil { + b.csiControllerPlugins[plugin] = info + } + + if info.NodeInfo != nil { + b.csiNodePlugins[plugin] = info + } } // batchCSIUpdates sends all of the batched CSI updates by calling f for each diff --git a/client/pluginmanager/csimanager/fingerprint.go b/client/pluginmanager/csimanager/fingerprint.go index b9596b9ce..fa60e87f1 100644 --- a/client/pluginmanager/csimanager/fingerprint.go +++ b/client/pluginmanager/csimanager/fingerprint.go @@ -85,6 +85,7 @@ func (p *pluginFingerprinter) fingerprint(ctx context.Context) *structs.CSIInfo func (p *pluginFingerprinter) buildBasicFingerprint(ctx context.Context) (*structs.CSIInfo, error) { info := &structs.CSIInfo{ PluginID: p.info.Name, + AllocID: p.info.AllocID, Healthy: false, HealthDescription: "initial fingerprint not completed", } diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index eef8a5b89..82ff914c6 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -31,6 +31,9 @@ type instanceManager struct { // `mountPoint` is bound in to. containerMountPoint string + // AllocID is the allocation id of the task group running the dynamic plugin + allocID string + fp *pluginFingerprinter volumeManager *volumeManager @@ -57,6 +60,7 @@ func newInstanceManager(logger hclog.Logger, updater UpdateNodeCSIInfoFunc, p *d mountPoint: p.Options["MountPoint"], containerMountPoint: p.Options["ContainerMountPoint"], + allocID: p.AllocID, volumeManagerSetupCh: make(chan struct{}), diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index a3320c836..fedb37801 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" @@ -472,7 +473,7 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { require.Equal(t, 0, len(resp.Volumes)) } -func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { +func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { t.Parallel() srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 // Prevent automatic dequeue @@ -482,40 +483,16 @@ func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { ns := structs.DefaultNamespace - job := mock.Job() - job.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ - ID: "foo", - Type: structs.CSIPluginTypeMonolith, - MountDir: "non-empty", - } + deleteNodes := CreateTestPlugin(srv.fsm.State(), "foo") + defer deleteNodes() state := srv.fsm.State() state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) srv.config.ACLEnabled = true - policy := mock.NamespacePolicy(ns, "", []string{ - acl.NamespaceCapabilityCSICreateVolume, - acl.NamespaceCapabilitySubmitJob, - }) - validToken := mock.CreatePolicyAndToken(t, state, 1001, acl.NamespaceCapabilityCSICreateVolume, policy) - codec := rpcClient(t, srv) - // Create the register request - req1 := &structs.JobRegisterRequest{ - Job: job, - WriteRequest: structs.WriteRequest{ - Region: "global", - Namespace: ns, - AuthToken: validToken.SecretID, - }, - } - resp1 := &structs.JobRegisterResponse{} - err := msgpackrpc.CallWithCodec(codec, "Job.Register", req1, resp1) - require.NoError(t, err) - require.NotEqual(t, uint64(0), resp1.Index) - // Get the plugin back out - policy = mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) + policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) getToken := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) req2 := &structs.CSIPluginGetRequest{ @@ -526,10 +503,8 @@ func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { }, } resp2 := &structs.CSIPluginGetResponse{} - err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) + err := msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) require.NoError(t, err) - // The job is created with a higher index than the plugin, there's an extra raft write - require.Greater(t, resp1.Index, resp2.Index) // List plugins req3 := &structs.CSIPluginListRequest{ @@ -544,19 +519,7 @@ func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { require.Equal(t, 1, len(resp3.Plugins)) // Deregistration works - req4 := &structs.JobDeregisterRequest{ - JobID: job.ID, - Purge: true, - WriteRequest: structs.WriteRequest{ - Region: "global", - Namespace: ns, - AuthToken: validToken.SecretID, - }, - } - resp4 := &structs.JobDeregisterResponse{} - err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", req4, resp4) - require.NoError(t, err) - require.Less(t, resp2.Index, resp4.Index) + deleteNodes() // Plugin is missing err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) @@ -564,6 +527,74 @@ func TestCSIPluginEndpoint_RegisterViaJob(t *testing.T) { require.Nil(t, resp2.Plugin) } +// CreateTestPlugin is a helper that generates the node + fingerprint results necessary to +// create a CSIPlugin by directly inserting into the state store. It's exported for use in +// other test packages +func CreateTestPlugin(s *state.StateStore, id string) func() { + // Create some nodes + ns := make([]*structs.Node, 3) + for i := range ns { + n := mock.Node() + ns[i] = n + } + + // Install healthy plugin fingerprinting results + ns[0].CSIControllerPlugins = map[string]*structs.CSIInfo{ + id: { + PluginID: id, + AllocID: uuid.Generate(), + Healthy: true, + HealthDescription: "healthy", + RequiresControllerPlugin: true, + RequiresTopologies: false, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsReadOnlyAttach: true, + SupportsAttachDetach: true, + SupportsListVolumes: true, + SupportsListVolumesAttachedNodes: false, + }, + }, + } + + // Install healthy plugin fingerprinting results + allocID := uuid.Generate() + for _, n := range ns[1:] { + n.CSINodePlugins = map[string]*structs.CSIInfo{ + id: { + PluginID: id, + AllocID: allocID, + Healthy: true, + HealthDescription: "healthy", + RequiresControllerPlugin: true, + RequiresTopologies: false, + NodeInfo: &structs.CSINodeInfo{ + ID: n.ID, + MaxVolumes: 64, + RequiresNodeStageVolume: true, + }, + }, + } + } + + // Insert them into the state store + index := uint64(999) + for _, n := range ns { + index++ + s.UpsertNode(index, n) + } + + // Return cleanup function that deletes the nodes + return func() { + ids := make([]string, len(ns)) + for i, n := range ns { + ids[i] = n.ID + } + + index++ + s.DeleteNode(index, ids) + } +} + func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { srv, shutdown := TestServer(t, func(c *Config) {}) defer shutdown() diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 4980382a6..c85c3cc90 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1020,15 +1020,15 @@ func deleteNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) erro plug.ModifyIndex = index if plug.IsEmpty() { - err := txn.Delete("csi_plugins", plug) + err = txn.Delete("csi_plugins", plug) if err != nil { return fmt.Errorf("csi_plugins delete error: %v", err) } - } - - err = txn.Insert("csi_plugins", plug) - if err != nil { - return fmt.Errorf("csi_plugins update error %s: %v", id, err) + } else { + err = txn.Insert("csi_plugins", plug) + if err != nil { + return fmt.Errorf("csi_plugins update error %s: %v", id, err) + } } } @@ -1176,10 +1176,6 @@ func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion b return fmt.Errorf("unable to upsert job into job_version table: %v", err) } - if err := s.upsertJobCSIPlugins(index, job, txn); err != nil { - return fmt.Errorf("unable to upsert csi_plugins table: %v", err) - } - // Insert the job if err := txn.Insert("jobs", job); err != nil { return fmt.Errorf("job insert failed: %v", err) @@ -1274,11 +1270,6 @@ func (s *StateStore) DeleteJobTxn(index uint64, namespace, jobID string, txn Txn return err } - // Delete the csi_plugins - if err := s.deleteJobCSIPlugins(index, job, txn); err != nil { - return err - } - // Delete the job summary if _, err = txn.DeleteAll("job_summary", "id", namespace, jobID); err != nil { return fmt.Errorf("deleing job summary failed: %v", err) @@ -1772,105 +1763,6 @@ func (s *StateStore) CSIVolumeDeregister(index uint64, ids []string) error { return nil } -// upsertJobCSIPlugins is called on UpsertJob and maintains the csi_plugin index of jobs -func (s *StateStore) upsertJobCSIPlugins(index uint64, job *structs.Job, txn *memdb.Txn) error { - ws := memdb.NewWatchSet() - plugs, err := s.csiPluginsByJob(ws, job, index) - if err != nil { - return fmt.Errorf("%v", err) - } - - // Append this job to all of them - for _, plug := range plugs { - if plug.CreateIndex != index { - plug = plug.Copy() - } - - plug.AddJob(job) - plug.ModifyIndex = index - err := txn.Insert("csi_plugins", plug) - if err != nil { - return err - } - } - - if err = txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { - return fmt.Errorf("index update failed: %v", err) - } - - return nil -} - -// csiPluginsByJob finds or creates CSIPlugins identified by the configuration contained in job -func (s *StateStore) csiPluginsByJob(ws memdb.WatchSet, job *structs.Job, index uint64) (map[string]*structs.CSIPlugin, error) { - txn := s.db.Txn(false) - defer txn.Abort() - - plugs := map[string]*structs.CSIPlugin{} - - for _, tg := range job.TaskGroups { - for _, t := range tg.Tasks { - if t.CSIPluginConfig == nil { - continue - } - - plug, ok := plugs[t.CSIPluginConfig.ID] - if ok { - continue - } - - plug, err := s.CSIPluginByID(ws, t.CSIPluginConfig.ID) - if err != nil { - return nil, err - } - - if plug == nil { - plug = structs.NewCSIPlugin(t.CSIPluginConfig.ID, index) - plug.Type = t.CSIPluginConfig.Type - } - - plugs[t.CSIPluginConfig.ID] = plug - } - } - - return plugs, nil -} - -// deleteJobCSIPlugins is called on DeleteJob -func (s *StateStore) deleteJobCSIPlugins(index uint64, job *structs.Job, txn *memdb.Txn) error { - ws := memdb.NewWatchSet() - plugs, err := s.csiPluginsByJob(ws, job, index) - if err != nil { - return fmt.Errorf("%v", err) - } - - // Remove this job from each plugin. If the plugin has no jobs left, remove it - for _, plug := range plugs { - if plug.CreateIndex != index { - plug = plug.Copy() - } - - plug.DeleteJob(job) - plug.ModifyIndex = index - - if plug.IsEmpty() { - err = txn.Delete("csi_plugins", plug) - } else { - plug.ModifyIndex = index - err = txn.Insert("csi_plugins", plug) - } - if err != nil { - return fmt.Errorf("csi_plugins update: %v", err) - } - } - - if err = txn.Insert("index", &IndexEntry{"csi_plugins", index}); err != nil { - return fmt.Errorf("index update failed: %v", err) - } - - return nil -} - // CSIVolumeDenormalizePlugins returns a CSIVolume with current health and plugins, but // without allocations // Use this for current volume metadata, handling lists of volumes @@ -1964,20 +1856,30 @@ func (s *StateStore) CSIPluginByID(ws memdb.WatchSet, id string) (*structs.CSIPl return plug, nil } -// CSIPluginDenormalize returns a CSIPlugin with jobs +// CSIPluginDenormalize returns a CSIPlugin with allocation details func (s *StateStore) CSIPluginDenormalize(ws memdb.WatchSet, plug *structs.CSIPlugin) (*structs.CSIPlugin, error) { if plug == nil { return nil, nil } - for ns, js := range plug.Jobs { - for id := range js { - j, err := s.JobByID(ws, ns, id) - if err != nil { - return nil, err - } - plug.Jobs[ns][id] = j + // Get the unique list of allocation ids + ids := map[string]struct{}{} + for _, info := range plug.Controllers { + ids[info.AllocID] = struct{}{} + } + for _, info := range plug.Nodes { + ids[info.AllocID] = struct{}{} + } + + for id := range ids { + alloc, err := s.AllocByID(ws, id) + if err != nil { + return nil, err } + if alloc == nil { + continue + } + plug.Allocations = append(plug.Allocations, alloc.Stub()) } return plug, nil diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index c965edf45..5b8cccf66 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2922,56 +2922,10 @@ func TestStateStore_CSIVolume(t *testing.T) { require.True(t, vs[0].CanReadOnly()) } -// TestStateStore_CSIPluginJobs creates plugin jobs and tests that they create a CSIPlugin -func TestStateStore_CSIPluginJobs(t *testing.T) { - index := uint64(999) - state := testStateStore(t) - testStateStore_CSIPluginJobs(t, index, state) -} - -func testStateStore_CSIPluginJobs(t *testing.T, index uint64, state *StateStore) (uint64, *StateStore) { - j0 := mock.Job() - j0.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ - ID: "foo", - Type: structs.CSIPluginTypeController, - } - - j1 := mock.Job() - j1.Type = structs.JobTypeSystem - j1.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{ - ID: "foo", - Type: structs.CSIPluginTypeNode, - } - - index++ - err := state.UpsertJob(index, j0) - require.NoError(t, err) - - index++ - err = state.UpsertJob(index, j1) - require.NoError(t, err) - - // Get the plugin back out by id - ws := memdb.NewWatchSet() - plug, err := state.CSIPluginByID(ws, "foo") - require.NoError(t, err) - - require.Equal(t, "foo", plug.ID) - - jids := map[string]struct{}{j0.ID: struct{}{}, j1.ID: struct{}{}} - for jid := range plug.Jobs[structs.DefaultNamespace] { - delete(jids, jid) - } - require.Equal(t, 0, len(jids)) - - return index, state -} - // TestStateStore_CSIPluginNodes uses the state from jobs, and uses node fingerprinting to update health func TestStateStore_CSIPluginNodes(t *testing.T) { index := uint64(999) state := testStateStore(t) - index, state = testStateStore_CSIPluginJobs(t, index, state) testStateStore_CSIPluginNodes(t, index, state) } @@ -3040,7 +2994,6 @@ func TestStateStore_CSIPluginBackwards(t *testing.T) { index := uint64(999) state := testStateStore(t) index, state = testStateStore_CSIPluginNodes(t, index, state) - testStateStore_CSIPluginJobs(t, index, state) } func TestStateStore_Indexes(t *testing.T) { diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index cc0bbaee7..5526c04d8 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -457,20 +457,22 @@ type CSIVolumeGetResponse struct { QueryMeta } -// CSIPlugin bundles job and info context for the plugin for clients +// CSIPlugin collects fingerprint info context for the plugin for clients type CSIPlugin struct { - ID string - Type CSIPluginType - - // Jobs is updated by UpsertJob, and keeps an index of jobs containing node or - // controller tasks for this plugin. It is addressed by [job.Namespace][job.ID] - Jobs map[string]map[string]*Job - + ID string ControllerRequired bool + + // Map Node.IDs to fingerprint results, split by type. Monolith type plugins have + // both sets of fingerprinting results. + Controllers map[string]*CSIInfo + Nodes map[string]*CSIInfo + + // Allocations are populated by denormalize to show running allocations + Allocations []*AllocListStub + + // Cache the count of healthy plugins ControllersHealthy int - Controllers map[string]*CSIInfo // map of client IDs to CSI Controllers NodesHealthy int - Nodes map[string]*CSIInfo // map of client IDs to CSI Nodes CreateIndex uint64 ModifyIndex uint64 @@ -489,7 +491,6 @@ func NewCSIPlugin(id string, index uint64) *CSIPlugin { } func (p *CSIPlugin) newStructs() { - p.Jobs = map[string]map[string]*Job{} p.Controllers = map[string]*CSIInfo{} p.Nodes = map[string]*CSIInfo{} } @@ -499,14 +500,6 @@ func (p *CSIPlugin) Copy() *CSIPlugin { out := © out.newStructs() - for ns, js := range p.Jobs { - out.Jobs[ns] = map[string]*Job{} - - for jid, j := range js { - out.Jobs[ns][jid] = j - } - } - for k, v := range p.Controllers { out.Controllers[k] = v } @@ -518,18 +511,6 @@ func (p *CSIPlugin) Copy() *CSIPlugin { return out } -// AddJob adds a job entry to the plugin -func (p *CSIPlugin) AddJob(job *Job) { - if _, ok := p.Jobs[job.Namespace]; !ok { - p.Jobs[job.Namespace] = map[string]*Job{} - } - p.Jobs[job.Namespace][job.ID] = nil -} - -func (p *CSIPlugin) DeleteJob(job *Job) { - delete(p.Jobs[job.Namespace], job.ID) -} - // AddPlugin adds a single plugin running on the node. Called from state.NodeUpdate in a // transaction func (p *CSIPlugin) AddPlugin(nodeID string, info *CSIInfo) { @@ -574,8 +555,6 @@ func (p *CSIPlugin) DeleteNode(nodeID string) { type CSIPluginListStub struct { ID string - Type CSIPluginType - JobIDs map[string]map[string]struct{} ControllersHealthy int ControllersExpected int NodesHealthy int @@ -585,18 +564,8 @@ type CSIPluginListStub struct { } func (p *CSIPlugin) Stub() *CSIPluginListStub { - ids := map[string]map[string]struct{}{} - for ns, js := range p.Jobs { - ids[ns] = map[string]struct{}{} - for id := range js { - ids[ns][id] = struct{}{} - } - } - return &CSIPluginListStub{ ID: p.ID, - Type: p.Type, - JobIDs: ids, ControllersHealthy: p.ControllersHealthy, ControllersExpected: len(p.Controllers), NodesHealthy: p.NodesHealthy, @@ -607,17 +576,7 @@ func (p *CSIPlugin) Stub() *CSIPluginListStub { } func (p *CSIPlugin) IsEmpty() bool { - if !(len(p.Controllers) == 0 && len(p.Nodes) == 0) { - return false - } - - empty := true - for _, m := range p.Jobs { - if len(m) > 0 { - empty = false - } - } - return empty + return len(p.Controllers) == 0 && len(p.Nodes) == 0 } type CSIPluginListRequest struct { diff --git a/nomad/structs/node.go b/nomad/structs/node.go index 69d3f0c29..d9d0062fc 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -146,6 +146,7 @@ func (c *CSIControllerInfo) Copy() *CSIControllerInfo { // as plugin health changes on the node. type CSIInfo struct { PluginID string + AllocID string Healthy bool HealthDescription string UpdateTime time.Time From 369b0e54b9a60d203d39b5ebf77af8957cc23853 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Tue, 3 Mar 2020 10:59:58 -0500 Subject: [PATCH 103/126] csi: volumes use `Schedulable` rather than `Healthy` (#7250) * structs: add ControllerRequired, volume.Name, no plug.Type * structs: Healthy -> Schedulable * state_store: Healthy -> Schedulable * api: add ControllerRequired to api data types * api: copy csi structs changes * nomad/structs/csi: include name and external id * api/csi: include Name and ExternalID * nomad/structs/csi: comments for the 3 ids --- api/csi.go | 31 ++++++++++++++++--------------- nomad/state/state_store.go | 6 +++--- nomad/state/state_store_test.go | 4 ++-- nomad/structs/csi.go | 28 ++++++++++++++++++---------- nomad/structs/csi_test.go | 2 +- 5 files changed, 40 insertions(+), 31 deletions(-) diff --git a/api/csi.go b/api/csi.go index ca6474970..556867d4e 100644 --- a/api/csi.go +++ b/api/csi.go @@ -81,6 +81,8 @@ const ( type CSIVolume struct { ID string Namespace string + Name string + ExternalID string Topologies []*CSITopology AccessMode CSIVolumeAccessMode AttachmentMode CSIVolumeAttachmentMode @@ -88,11 +90,9 @@ type CSIVolume struct { // Combine structs.{Read,Write,Past}Allocs Allocations []*AllocationListStub - // Healthy is true iff all the denormalized plugin health fields are true, and the - // volume has not been marked for garbage collection - Healthy bool - VolumeGC time.Time + Schedulable bool PluginID string + ControllerRequired bool ControllersHealthy int ControllersExpected int NodesHealthy int @@ -119,17 +119,16 @@ func (v CSIVolumeIndexSort) Swap(i, j int) { // CSIVolumeListStub omits allocations. See also nomad/structs/csi.go type CSIVolumeListStub struct { - ID string - Namespace string - Topologies []*CSITopology - AccessMode CSIVolumeAccessMode - AttachmentMode CSIVolumeAttachmentMode - - // Healthy is true iff all the denormalized plugin health fields are true, and the - // volume has not been marked for garbage collection - Healthy bool - VolumeGC time.Time + ID string + Namespace string + Name string + ExternalID string + Topologies []*CSITopology + AccessMode CSIVolumeAccessMode + AttachmentMode CSIVolumeAttachmentMode + Schedulable bool PluginID string + ControllerRequired bool ControllersHealthy int ControllersExpected int NodesHealthy int @@ -156,7 +155,8 @@ type CSIPlugins struct { } type CSIPlugin struct { - ID string + ID string + ControllerRequired bool // Map Node.ID to CSIInfo fingerprint results Controllers map[string]*CSIInfo Nodes map[string]*CSIInfo @@ -168,6 +168,7 @@ type CSIPlugin struct { type CSIPluginListStub struct { ID string + ControllerRequired bool ControllersHealthy int ControllersExpected int NodesHealthy int diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index c85c3cc90..2b1567d7a 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1783,7 +1783,7 @@ func (s *StateStore) CSIVolumeDenormalizePlugins(ws memdb.WatchSet, vol *structs if plug == nil { vol.ControllersHealthy = 0 vol.NodesHealthy = 0 - vol.Healthy = false + vol.Schedulable = false return vol, nil } @@ -1795,9 +1795,9 @@ func (s *StateStore) CSIVolumeDenormalizePlugins(ws memdb.WatchSet, vol *structs vol.ControllersExpected = len(plug.Controllers) vol.NodesExpected = len(plug.Nodes) - vol.Healthy = vol.NodesHealthy > 0 + vol.Schedulable = vol.NodesHealthy > 0 if vol.ControllerRequired { - vol.Healthy = vol.ControllersHealthy > 0 && vol.Healthy + vol.Schedulable = vol.ControllersHealthy > 0 && vol.Schedulable } return vol, nil diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 5b8cccf66..5af1ad81b 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2837,7 +2837,7 @@ func TestStateStore_CSIVolume(t *testing.T) { v0.ID = id0 v0.Namespace = "default" v0.PluginID = "minnie" - v0.Healthy = true + v0.Schedulable = true v0.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem @@ -2846,7 +2846,7 @@ func TestStateStore_CSIVolume(t *testing.T) { v1.ID = id1 v1.Namespace = "default" v1.PluginID = "adam" - v1.Healthy = true + v1.Schedulable = true v1.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v1.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 5526c04d8..77709b362 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -137,7 +137,12 @@ func ValidCSIVolumeWriteAccessMode(accessMode CSIVolumeAccessMode) bool { // CSIVolume is the full representation of a CSI Volume type CSIVolume struct { - ID string + // ID is a namespace unique URL safe identifier for the volume + ID string + // Name is a display name for the volume, not required to be unique + Name string + // ExternalID identifies the volume for the CSI interface, may be URL unsafe + ExternalID string Namespace string Topologies []*CSITopology AccessMode CSIVolumeAccessMode @@ -147,10 +152,9 @@ type CSIVolume struct { ReadAllocs map[string]*Allocation WriteAllocs map[string]*Allocation - // Healthy is true if all the denormalized plugin health fields are true, and the + // Schedulable is true if all the denormalized plugin health fields are true, and the // volume has not been marked for garbage collection - Healthy bool - VolumeGC time.Time + Schedulable bool PluginID string ControllerRequired bool ControllersHealthy int @@ -167,13 +171,14 @@ type CSIVolume struct { type CSIVolListStub struct { ID string Namespace string + Name string + ExternalID string Topologies []*CSITopology AccessMode CSIVolumeAccessMode AttachmentMode CSIVolumeAttachmentMode CurrentReaders int CurrentWriters int - Healthy bool - VolumeGC time.Time + Schedulable bool PluginID string ControllersHealthy int ControllersExpected int @@ -208,13 +213,14 @@ func (v *CSIVolume) Stub() *CSIVolListStub { stub := CSIVolListStub{ ID: v.ID, Namespace: v.Namespace, + Name: v.Name, + ExternalID: v.ExternalID, Topologies: v.Topologies, AccessMode: v.AccessMode, AttachmentMode: v.AttachmentMode, CurrentReaders: len(v.ReadAllocs), CurrentWriters: len(v.WriteAllocs), - Healthy: v.Healthy, - VolumeGC: v.VolumeGC, + Schedulable: v.Schedulable, PluginID: v.PluginID, ControllersHealthy: v.ControllersHealthy, NodesHealthy: v.NodesHealthy, @@ -227,7 +233,7 @@ func (v *CSIVolume) Stub() *CSIVolListStub { } func (v *CSIVolume) CanReadOnly() bool { - if !v.Healthy { + if !v.Schedulable { return false } @@ -235,7 +241,7 @@ func (v *CSIVolume) CanReadOnly() bool { } func (v *CSIVolume) CanWrite() bool { - if !v.Healthy { + if !v.Schedulable { return false } @@ -555,6 +561,7 @@ func (p *CSIPlugin) DeleteNode(nodeID string) { type CSIPluginListStub struct { ID string + ControllerRequired bool ControllersHealthy int ControllersExpected int NodesHealthy int @@ -566,6 +573,7 @@ type CSIPluginListStub struct { func (p *CSIPlugin) Stub() *CSIPluginListStub { return &CSIPluginListStub{ ID: p.ID, + ControllerRequired: p.ControllerRequired, ControllersHealthy: p.ControllersHealthy, ControllersExpected: len(p.Controllers), NodesHealthy: p.NodesHealthy, diff --git a/nomad/structs/csi_test.go b/nomad/structs/csi_test.go index 7685b41b4..83f2ba2c9 100644 --- a/nomad/structs/csi_test.go +++ b/nomad/structs/csi_test.go @@ -9,7 +9,7 @@ import ( func TestCSIVolumeClaim(t *testing.T) { vol := NewCSIVolume("", 0) vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter - vol.Healthy = true + vol.Schedulable = true alloc := &Allocation{ID: "al"} From b3bf64485e0159d22152fb4f3da3d242c46eb7d1 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 4 Mar 2020 11:09:29 -0500 Subject: [PATCH 104/126] csi: remove DevDisableBootstrap flag from tests (#7267) In #7252 we removed the `DevDisableBootstrap` flag to require tests to honor only `BootstrapExpect`, in order to reduce a source of test flakiness. This changeset applies the same fix to the CSI tests. --- nomad/client_csi_endpoint_test.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/nomad/client_csi_endpoint_test.go b/nomad/client_csi_endpoint_test.go index 48774fc12..4be3ce99d 100644 --- a/nomad/client_csi_endpoint_test.go +++ b/nomad/client_csi_endpoint_test.go @@ -51,11 +51,9 @@ func TestClientCSIController_AttachVolume_Forwarded(t *testing.T) { require := require.New(t) // Start a server and client - s1, cleanupS1 := TestServer(t, nil) + s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 }) defer cleanupS1() - s2, cleanupS2 := TestServer(t, func(c *Config) { - c.DevDisableBootstrap = true - }) + s2, cleanupS2 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 }) defer cleanupS2() TestJoin(t, s1, s2) testutil.WaitForLeader(t, s1.RPC) From 016281135cbc8def2911382131206d69bab6e86c Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 6 Mar 2020 09:44:43 -0500 Subject: [PATCH 105/126] storage: add volumes to 'nomad alloc status' CLI (#7256) Adds a stanza for both Host Volumes and CSI Volumes to the the CLI output for `nomad alloc status`. Mostly relies on information already in the API structs, but in the case where there are CSI Volumes we need to make extra API calls to get the volume status. To reduce overhead, these extra calls are hidden behind the `-verbose` flag. --- command/alloc_status.go | 82 +++++++++++++++++++- command/alloc_status_test.go | 146 +++++++++++++++++++++++++++++++++++ 2 files changed, 226 insertions(+), 2 deletions(-) diff --git a/command/alloc_status.go b/command/alloc_status.go index d9eb416aa..b8d860257 100644 --- a/command/alloc_status.go +++ b/command/alloc_status.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/api/contexts" "github.com/hashicorp/nomad/client/allocrunner/taskrunner/restarts" + "github.com/hashicorp/nomad/nomad/structs" "github.com/posener/complete" ) @@ -214,7 +215,7 @@ func (c *AllocStatusCommand) Run(args []string) int { c.Ui.Output("Omitting resource statistics since the node is down.") } } - c.outputTaskDetails(alloc, stats, displayStats) + c.outputTaskDetails(alloc, stats, displayStats, verbose) } // Format the detailed status @@ -362,12 +363,13 @@ func futureEvalTimePretty(evalID string, client *api.Client) string { // outputTaskDetails prints task details for each task in the allocation, // optionally printing verbose statistics if displayStats is set -func (c *AllocStatusCommand) outputTaskDetails(alloc *api.Allocation, stats *api.AllocResourceUsage, displayStats bool) { +func (c *AllocStatusCommand) outputTaskDetails(alloc *api.Allocation, stats *api.AllocResourceUsage, displayStats bool, verbose bool) { for task := range c.sortedTaskStateIterator(alloc.TaskStates) { state := alloc.TaskStates[task] c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[bold]Task %q is %q[reset]", task, state.State))) c.outputTaskResources(alloc, task, stats, displayStats) c.Ui.Output("") + c.outputTaskVolumes(alloc, task, verbose) c.outputTaskStatus(state) } } @@ -721,3 +723,79 @@ func (c *AllocStatusCommand) sortedTaskStateIterator(m map[string]*api.TaskState close(output) return output } + +func (c *AllocStatusCommand) outputTaskVolumes(alloc *api.Allocation, taskName string, verbose bool) { + var task *api.Task + var tg *api.TaskGroup +FOUND: + for _, tg = range alloc.Job.TaskGroups { + for _, task = range tg.Tasks { + if task.Name == taskName { + break FOUND + } + } + } + if task == nil || tg == nil { + c.Ui.Error(fmt.Sprintf("Could not find task data for %q", taskName)) + return + } + if len(task.VolumeMounts) == 0 { + return + } + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return + } + + var hostVolumesOutput []string + var csiVolumesOutput []string + hostVolumesOutput = append(hostVolumesOutput, "ID|Read Only") + if verbose { + csiVolumesOutput = append(csiVolumesOutput, + "ID|Plugin|Provider|Schedulable|Read Only|Mount Options") + } else { + csiVolumesOutput = append(csiVolumesOutput, "ID|Read Only") + } + + for _, volMount := range task.VolumeMounts { + volReq := tg.Volumes[*volMount.Volume] + switch volReq.Type { + case structs.VolumeTypeHost: + hostVolumesOutput = append(hostVolumesOutput, + fmt.Sprintf("%s|%v", volReq.Name, *volMount.ReadOnly)) + case structs.VolumeTypeCSI: + if verbose { + // there's an extra API call per volume here so we toggle it + // off with the -verbose flag + vol, _, err := client.CSIVolumes().Info(volReq.Name, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error retrieving volume info for %q: %s", + volReq.Name, err)) + continue + } + csiVolumesOutput = append(csiVolumesOutput, + fmt.Sprintf("%s|%s|%s|%v|%v|%s", + volReq.Name, vol.PluginID, + "n/a", // TODO(tgross): https://github.com/hashicorp/nomad/issues/7248 + vol.Schedulable, + volReq.ReadOnly, + "n/a", // TODO(tgross): https://github.com/hashicorp/nomad/issues/7007 + )) + } else { + csiVolumesOutput = append(csiVolumesOutput, + fmt.Sprintf("%s|%v", volReq.Name, volReq.ReadOnly)) + } + } + } + if len(hostVolumesOutput) > 1 { + c.Ui.Output("Host Volumes:") + c.Ui.Output(formatList(hostVolumesOutput)) + c.Ui.Output("") // line padding to next stanza + } + if len(csiVolumesOutput) > 1 { + c.Ui.Output("CSI Volumes:") + c.Ui.Output(formatList(csiVolumesOutput)) + c.Ui.Output("") // line padding to next stanza + } +} diff --git a/command/alloc_status_test.go b/command/alloc_status_test.go index 0c0bf38e9..7ae840472 100644 --- a/command/alloc_status_test.go +++ b/command/alloc_status_test.go @@ -2,11 +2,14 @@ package command import ( "fmt" + "io/ioutil" + "os" "regexp" "strings" "testing" "time" + "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" @@ -315,3 +318,146 @@ func TestAllocStatusCommand_AutocompleteArgs(t *testing.T) { assert.Equal(1, len(res)) assert.Equal(a.ID, res[0]) } + +func TestAllocStatusCommand_HostVolumes(t *testing.T) { + t.Parallel() + // We have to create a tempdir for the host volume even though we're + // not going to use it b/c the server validates the config on startup + tmpDir, err := ioutil.TempDir("", "vol0") + if err != nil { + t.Fatalf("unable to create tempdir for test: %v", err) + } + defer os.RemoveAll(tmpDir) + + vol0 := uuid.Generate() + srv, _, url := testServer(t, true, func(c *agent.Config) { + c.Client.HostVolumes = []*structs.ClientHostVolumeConfig{ + { + Name: vol0, + Path: tmpDir, + ReadOnly: false, + }, + } + }) + defer srv.Shutdown() + state := srv.Agent.Server().State() + + // Upsert the job and alloc + node := mock.Node() + alloc := mock.Alloc() + alloc.Metrics = &structs.AllocMetric{} + alloc.NodeID = node.ID + job := alloc.Job + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + vol0: { + Name: vol0, + Type: structs.VolumeTypeHost, + Source: tmpDir, + }, + } + job.TaskGroups[0].Tasks[0].VolumeMounts = []*structs.VolumeMount{ + { + Volume: vol0, + Destination: "/var/www", + ReadOnly: true, + PropagationMode: "private", + }, + } + // fakes the placement enough so that we have something to iterate + // on in 'nomad alloc status' + alloc.TaskStates = map[string]*structs.TaskState{ + "web": &structs.TaskState{ + Events: []*structs.TaskEvent{ + structs.NewTaskEvent("test event").SetMessage("test msg"), + }, + }, + } + summary := mock.JobSummary(alloc.JobID) + require.NoError(t, state.UpsertJobSummary(1004, summary)) + require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc})) + + ui := new(cli.MockUi) + cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}} + if code := cmd.Run([]string{"-address=" + url, "-verbose", alloc.ID}); code != 0 { + t.Fatalf("expected exit 0, got: %d", code) + } + out := ui.OutputWriter.String() + require.Contains(t, out, "Host Volumes") + require.Contains(t, out, fmt.Sprintf("%s true", vol0)) + require.NotContains(t, out, "CSI Volumes") +} + +func TestAllocStatusCommand_CSIVolumes(t *testing.T) { + t.Parallel() + srv, _, url := testServer(t, true, nil) + defer srv.Shutdown() + state := srv.Agent.Server().State() + + // Upsert the node, plugin, and volume + vol0 := uuid.Generate() + node := mock.Node() + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "minnie": { + PluginID: "minnie", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + err := state.UpsertNode(1001, node) + require.NoError(t, err) + + vols := []*structs.CSIVolume{{ + ID: vol0, + Namespace: "notTheNamespace", + PluginID: "minnie", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + Topologies: []*structs.CSITopology{{ + Segments: map[string]string{"foo": "bar"}, + }}, + }} + err = state.CSIVolumeRegister(1002, vols) + require.NoError(t, err) + + // Upsert the job and alloc + alloc := mock.Alloc() + alloc.Metrics = &structs.AllocMetric{} + alloc.NodeID = node.ID + job := alloc.Job + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + vol0: { + Name: vol0, + Type: structs.VolumeTypeCSI, + Source: "/tmp/vol0", + }, + } + job.TaskGroups[0].Tasks[0].VolumeMounts = []*structs.VolumeMount{ + { + Volume: vol0, + Destination: "/var/www", + ReadOnly: true, + PropagationMode: "private", + }, + } + // if we don't set a task state, there's nothing to iterate on alloc status + alloc.TaskStates = map[string]*structs.TaskState{ + "web": &structs.TaskState{ + Events: []*structs.TaskEvent{ + structs.NewTaskEvent("test event").SetMessage("test msg"), + }, + }, + } + summary := mock.JobSummary(alloc.JobID) + require.NoError(t, state.UpsertJobSummary(1004, summary)) + require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc})) + + ui := new(cli.MockUi) + cmd := &AllocStatusCommand{Meta: Meta{Ui: ui}} + if code := cmd.Run([]string{"-address=" + url, "-verbose", alloc.ID}); code != 0 { + t.Fatalf("expected exit 0, got: %d", code) + } + out := ui.OutputWriter.String() + require.Contains(t, out, "CSI Volumes") + require.Contains(t, out, fmt.Sprintf("%s minnie", vol0)) + require.NotContains(t, out, "Host Volumes") +} From 887e1f28c931f1080c63fd28c309d9492aa2bbf0 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Fri, 6 Mar 2020 10:09:10 -0500 Subject: [PATCH 106/126] csi: CLI for volume status, registration/deregistration and plugin status (#7193) * command/csi: csi, csi_plugin, csi_volume * helper/funcs: move ExtraKeys from parse_config to UnusedKeys * command/agent/config_parse: use helper.UnusedKeys * api/csi: annotate CSIVolumes with hcl fields * command/csi_plugin: add Synopsis * command/csi_volume_register: use hcl.Decode style parsing * command/csi_volume_list * command/csi_volume_status: list format, cleanup * command/csi_plugin_list * command/csi_plugin_status * command/csi_volume_deregister * command/csi_volume: add Synopsis * api/contexts/contexts: add csi search contexts to the constants * command/commands: register csi commands * api/csi: fix struct tag for linter * command/csi_plugin_list: unused struct vars * command/csi_plugin_status: unused struct vars * command/csi_volume_list: unused struct vars * api/csi: add allocs to CSIPlugin * command/csi_plugin_status: format the allocs * api/allocations: copy Allocation.Stub in from structs * nomad/client_rpc: add some error context with Errorf * api/csi: collapse read & write alloc maps to a stub list * command/csi_volume_status: cleanup allocation display * command/csi_volume_list: use Schedulable instead of Healthy * command/csi_volume_status: use Schedulable instead of Healthy * command/csi_volume_list: sprintf string * command/csi: delete csi.go, csi_plugin.go * command/plugin: refactor csi components to sub-command plugin status * command/plugin: remove csi * command/plugin_status: remove csi * command/volume: remove csi * command/volume_status: split out csi specific * helper/funcs: add RemoveEqualFold * command/agent/config_parse: use helper.RemoveEqualFold * api/csi: do ,unusedKeys right * command/volume: refactor csi components to `nomad volume` * command/volume_register: split out csi specific * command/commands: use the new top level commands * command/volume_deregister: hardwired type csi for now * command/volume_status: csiFormatVolumes rescued from volume_list * command/plugin_status: avoid a panic on no args * command/volume_status: avoid a panic on no args * command/plugin_status: predictVolumeType * command/volume_status: predictVolumeType * nomad/csi_endpoint_test: move CreateTestPlugin to testing * command/plugin_status_test: use CreateTestCSIPlugin * nomad/structs/structs: add CSIPlugins and CSIVolumes search consts * nomad/state/state_store: add CSIPlugins and CSIVolumesByIDPrefix * nomad/search_endpoint: add CSIPlugins and CSIVolumes * command/plugin_status: move the header to the csi specific * command/volume_status: move the header to the csi specific * nomad/state/state_store: CSIPluginByID prefix * command/status: rename the search context to just Plugins/Volumes * command/plugin,volume_status: test return ids now * command/status: rename the search context to just Plugins/Volumes * command/plugin_status: support -json and -t * command/volume_status: support -json and -t * command/plugin_status_csi: comments * command/*_status: clean up text * api/csi: fix stale comments * command/volume: make deregister sound less fearsome * command/plugin_status: set the id length * command/plugin_status_csi: more compact plugin health * command/volume: better error message, comment --- api/allocations.go | 30 +++++++ api/contexts/contexts.go | 2 + api/csi.go | 42 +++++++-- command/agent/config_parse.go | 93 ++++--------------- command/commands.go | 31 +++++++ command/plugin.go | 26 ++++++ command/plugin_status.go | 146 ++++++++++++++++++++++++++++++ command/plugin_status_csi.go | 111 +++++++++++++++++++++++ command/plugin_status_test.go | 57 ++++++++++++ command/status.go | 4 + command/volume.go | 46 ++++++++++ command/volume_deregister.go | 88 ++++++++++++++++++ command/volume_register.go | 130 +++++++++++++++++++++++++++ command/volume_register_csi.go | 44 +++++++++ command/volume_register_test.go | 97 ++++++++++++++++++++ command/volume_status.go | 134 ++++++++++++++++++++++++++++ command/volume_status_csi.go | 152 ++++++++++++++++++++++++++++++++ command/volume_status_test.go | 58 ++++++++++++ helper/funcs.go | 74 ++++++++++++++++ nomad/client_rpc.go | 6 +- nomad/csi_endpoint_test.go | 71 +-------------- nomad/search_endpoint.go | 20 +++-- nomad/search_endpoint_test.go | 76 ++++++++++++++++ nomad/state/state_store.go | 44 ++++++++- nomad/structs/structs.go | 2 + nomad/testing.go | 70 +++++++++++++++ 26 files changed, 1486 insertions(+), 168 deletions(-) create mode 100644 command/plugin.go create mode 100644 command/plugin_status.go create mode 100644 command/plugin_status_csi.go create mode 100644 command/plugin_status_test.go create mode 100644 command/volume.go create mode 100644 command/volume_deregister.go create mode 100644 command/volume_register.go create mode 100644 command/volume_register_csi.go create mode 100644 command/volume_register_test.go create mode 100644 command/volume_status.go create mode 100644 command/volume_status_csi.go create mode 100644 command/volume_status_test.go diff --git a/api/allocations.go b/api/allocations.go index 27a6d5b1f..cc2be5ea7 100644 --- a/api/allocations.go +++ b/api/allocations.go @@ -399,6 +399,36 @@ type NodeScoreMeta struct { NormScore float64 } +// Stub returns a list stub for the allocation +func (a *Allocation) Stub() *AllocationListStub { + return &AllocationListStub{ + ID: a.ID, + EvalID: a.EvalID, + Name: a.Name, + Namespace: a.Namespace, + NodeID: a.NodeID, + NodeName: a.NodeName, + JobID: a.JobID, + JobType: *a.Job.Type, + JobVersion: *a.Job.Version, + TaskGroup: a.TaskGroup, + DesiredStatus: a.DesiredStatus, + DesiredDescription: a.DesiredDescription, + ClientStatus: a.ClientStatus, + ClientDescription: a.ClientDescription, + TaskStates: a.TaskStates, + DeploymentStatus: a.DeploymentStatus, + FollowupEvalID: a.FollowupEvalID, + RescheduleTracker: a.RescheduleTracker, + PreemptedAllocations: a.PreemptedAllocations, + PreemptedByAllocation: a.PreemptedByAllocation, + CreateIndex: a.CreateIndex, + ModifyIndex: a.ModifyIndex, + CreateTime: a.CreateTime, + ModifyTime: a.ModifyTime, + } +} + // AllocationListStub is used to return a subset of an allocation // during list operations. type AllocationListStub struct { diff --git a/api/contexts/contexts.go b/api/contexts/contexts.go index 51b257c40..ae40db3f8 100644 --- a/api/contexts/contexts.go +++ b/api/contexts/contexts.go @@ -11,5 +11,7 @@ const ( Nodes Context = "nodes" Namespaces Context = "namespaces" Quotas Context = "quotas" + Plugins Context = "plugins" + Volumes Context = "volumes" All Context = "all" ) diff --git a/api/csi.go b/api/csi.go index 556867d4e..8d5883221 100644 --- a/api/csi.go +++ b/api/csi.go @@ -38,6 +38,10 @@ func (v *CSIVolumes) Info(id string, q *QueryOptions) (*CSIVolume, *QueryMeta, e if err != nil { return nil, nil, err } + + // Cleanup allocation representation for the ui + resp.allocs() + return &resp, qm, nil } @@ -79,19 +83,24 @@ const ( // CSIVolume is used for serialization, see also nomad/structs/csi.go type CSIVolume struct { - ID string - Namespace string - Name string - ExternalID string - Topologies []*CSITopology - AccessMode CSIVolumeAccessMode - AttachmentMode CSIVolumeAttachmentMode + ID string `hcl:"id"` + Name string `hcl:"name"` + ExternalID string `hcl:"external_id"` + Namespace string `hcl:"namespace"` + Topologies []*CSITopology `hcl:"topologies"` + AccessMode CSIVolumeAccessMode `hcl:"access_mode"` + AttachmentMode CSIVolumeAttachmentMode `hcl:"attachment_mode"` - // Combine structs.{Read,Write,Past}Allocs + // Allocations, tracking claim status + ReadAllocs map[string]*Allocation + WriteAllocs map[string]*Allocation + + // Combine structs.{Read,Write}Allocs Allocations []*AllocationListStub + // Schedulable is true if all the denormalized plugin health fields are true Schedulable bool - PluginID string + PluginID string `hcl:"plugin_id"` ControllerRequired bool ControllersHealthy int ControllersExpected int @@ -101,6 +110,20 @@ type CSIVolume struct { CreateIndex uint64 ModifyIndex uint64 + + // ExtraKeysHCL is used by the hcl parser to report unexpected keys + ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` +} + +// allocs is called after we query the volume (creating this CSIVolume struct) to collapse +// allocations for the UI +func (v *CSIVolume) allocs() { + for _, a := range v.WriteAllocs { + v.Allocations = append(v.Allocations, a.Stub()) + } + for _, a := range v.ReadAllocs { + v.Allocations = append(v.Allocations, a.Stub()) + } } type CSIVolumeIndexSort []*CSIVolumeListStub @@ -160,6 +183,7 @@ type CSIPlugin struct { // Map Node.ID to CSIInfo fingerprint results Controllers map[string]*CSIInfo Nodes map[string]*CSIInfo + Allocations []*AllocationListStub ControllersHealthy int NodesHealthy int CreateIndex uint64 diff --git a/command/agent/config_parse.go b/command/agent/config_parse.go index 34e192b74..6893abdaf 100644 --- a/command/agent/config_parse.go +++ b/command/agent/config_parse.go @@ -6,11 +6,10 @@ import ( "io" "os" "path/filepath" - "reflect" - "strings" "time" "github.com/hashicorp/hcl" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs/config" ) @@ -99,106 +98,44 @@ func durations(xs []td) error { return nil } -// removeEqualFold removes the first string that EqualFold matches -func removeEqualFold(xs *[]string, search string) { - sl := *xs - for i, x := range sl { - if strings.EqualFold(x, search) { - sl = append(sl[:i], sl[i+1:]...) - if len(sl) == 0 { - *xs = nil - } else { - *xs = sl - } - return - } - } -} - func extraKeys(c *Config) error { // hcl leaves behind extra keys when parsing JSON. These keys // are kept on the top level, taken from slices or the keys of // structs contained in slices. Clean up before looking for // extra keys. for range c.HTTPAPIResponseHeaders { - removeEqualFold(&c.ExtraKeysHCL, "http_api_response_headers") + helper.RemoveEqualFold(&c.ExtraKeysHCL, "http_api_response_headers") } for _, p := range c.Plugins { - removeEqualFold(&c.ExtraKeysHCL, p.Name) - removeEqualFold(&c.ExtraKeysHCL, "config") - removeEqualFold(&c.ExtraKeysHCL, "plugin") + helper.RemoveEqualFold(&c.ExtraKeysHCL, p.Name) + helper.RemoveEqualFold(&c.ExtraKeysHCL, "config") + helper.RemoveEqualFold(&c.ExtraKeysHCL, "plugin") } for _, k := range []string{"options", "meta", "chroot_env", "servers", "server_join"} { - removeEqualFold(&c.ExtraKeysHCL, k) - removeEqualFold(&c.ExtraKeysHCL, "client") + helper.RemoveEqualFold(&c.ExtraKeysHCL, k) + helper.RemoveEqualFold(&c.ExtraKeysHCL, "client") } // stats is an unused key, continue to silently ignore it - removeEqualFold(&c.Client.ExtraKeysHCL, "stats") + helper.RemoveEqualFold(&c.Client.ExtraKeysHCL, "stats") // Remove HostVolume extra keys for _, hv := range c.Client.HostVolumes { - removeEqualFold(&c.Client.ExtraKeysHCL, hv.Name) - removeEqualFold(&c.Client.ExtraKeysHCL, "host_volume") + helper.RemoveEqualFold(&c.Client.ExtraKeysHCL, hv.Name) + helper.RemoveEqualFold(&c.Client.ExtraKeysHCL, "host_volume") } for _, k := range []string{"enabled_schedulers", "start_join", "retry_join", "server_join"} { - removeEqualFold(&c.ExtraKeysHCL, k) - removeEqualFold(&c.ExtraKeysHCL, "server") + helper.RemoveEqualFold(&c.ExtraKeysHCL, k) + helper.RemoveEqualFold(&c.ExtraKeysHCL, "server") } for _, k := range []string{"datadog_tags"} { - removeEqualFold(&c.ExtraKeysHCL, k) - removeEqualFold(&c.ExtraKeysHCL, "telemetry") + helper.RemoveEqualFold(&c.ExtraKeysHCL, k) + helper.RemoveEqualFold(&c.ExtraKeysHCL, "telemetry") } - return extraKeysImpl([]string{}, reflect.ValueOf(*c)) -} - -// extraKeysImpl returns an error if any extraKeys array is not empty -func extraKeysImpl(path []string, val reflect.Value) error { - stype := val.Type() - for i := 0; i < stype.NumField(); i++ { - ftype := stype.Field(i) - fval := val.Field(i) - - name := ftype.Name - prop := "" - tagSplit(ftype, "hcl", &name, &prop) - - if fval.Kind() == reflect.Ptr { - fval = reflect.Indirect(fval) - } - - // struct? recurse. add the struct's key to the path - if fval.Kind() == reflect.Struct { - err := extraKeysImpl(append([]string{name}, path...), fval) - if err != nil { - return err - } - } - - if "unusedKeys" == prop { - if ks, ok := fval.Interface().([]string); ok && len(ks) != 0 { - return fmt.Errorf("%s unexpected keys %s", - strings.Join(path, "."), - strings.Join(ks, ", ")) - } - } - } - return nil -} - -// tagSplit reads the named tag from the structfield and splits its values into strings -func tagSplit(field reflect.StructField, tagName string, vars ...*string) { - tag := strings.Split(field.Tag.Get(tagName), ",") - end := len(tag) - 1 - for i, s := range vars { - if i > end { - return - } - *s = tag[i] - } + return helper.UnusedKeys(c) } diff --git a/command/commands.go b/command/commands.go index ff2100a9d..dbbcec006 100644 --- a/command/commands.go +++ b/command/commands.go @@ -493,6 +493,17 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { }, nil }, + "plugin": func() (cli.Command, error) { + return &PluginCommand{ + Meta: meta, + }, nil + }, + "plugin status": func() (cli.Command, error) { + return &PluginStatusCommand{ + Meta: meta, + }, nil + }, + "quota": func() (cli.Command, error) { return &QuotaCommand{ Meta: meta, @@ -646,6 +657,26 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { Ui: meta.Ui, }, nil }, + "volume": func() (cli.Command, error) { + return &VolumeCommand{ + Meta: meta, + }, nil + }, + "volume status": func() (cli.Command, error) { + return &VolumeStatusCommand{ + Meta: meta, + }, nil + }, + "volume register": func() (cli.Command, error) { + return &VolumeRegisterCommand{ + Meta: meta, + }, nil + }, + "volume deregister": func() (cli.Command, error) { + return &VolumeDeregisterCommand{ + Meta: meta, + }, nil + }, } deprecated := map[string]cli.CommandFactory{ diff --git a/command/plugin.go b/command/plugin.go new file mode 100644 index 000000000..7128e7cbe --- /dev/null +++ b/command/plugin.go @@ -0,0 +1,26 @@ +package command + +import "github.com/mitchellh/cli" + +type PluginCommand struct { + Meta +} + +func (c *PluginCommand) Help() string { + helpText := ` +Usage nomad plugin status [options] [plugin] + + This command groups subcommands for interacting with plugins. +` + return helpText +} + +func (c *PluginCommand) Synopsis() string { + return "Inspect plugins" +} + +func (c *PluginCommand) Name() string { return "plugin" } + +func (c *PluginCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/plugin_status.go b/command/plugin_status.go new file mode 100644 index 000000000..95eeb5cf6 --- /dev/null +++ b/command/plugin_status.go @@ -0,0 +1,146 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/nomad/api/contexts" + "github.com/posener/complete" +) + +type PluginStatusCommand struct { + Meta + length int + short bool + verbose bool + json bool + template string +} + +func (c *PluginStatusCommand) Help() string { + helpText := ` +Usage nomad plugin status [options] + + Display status information about a plugin. If no plugin id is given, + a list of all plugins will be displayed. + +General Options: + + ` + generalOptionsUsage() + ` + +Status Options: + + -type + List only plugins of type . + + -short + Display short output. + + -verbose + Display full information. + + -json + Output the allocation in its JSON format. + + -t + Format and display allocation using a Go template. +` + return helpText +} + +func (c *PluginStatusCommand) Synopsis() string { + return "Display status information about a plugin" +} + +// predictVolumeType is also used in volume_status +var predictVolumeType = complete.PredictFunc(func(a complete.Args) []string { + types := []string{"csi"} + for _, t := range types { + if strings.Contains(t, a.Last) { + return []string{t} + } + } + return nil +}) + +func (c *PluginStatusCommand) AutocompleteFlags() complete.Flags { + return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), + complete.Flags{ + "-type": predictVolumeType, + "-short": complete.PredictNothing, + "-verbose": complete.PredictNothing, + "-json": complete.PredictNothing, + "-t": complete.PredictAnything, + }) +} + +func (c *PluginStatusCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictFunc(func(a complete.Args) []string { + client, err := c.Meta.Client() + if err != nil { + return nil + } + + resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Plugins, nil) + if err != nil { + return []string{} + } + return resp.Matches[contexts.Plugins] + }) +} + +func (c *PluginStatusCommand) Name() string { return "plugin status" } + +func (c *PluginStatusCommand) Run(args []string) int { + var typeArg string + + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + flags.StringVar(&typeArg, "type", "", "") + flags.BoolVar(&c.short, "short", false, "") + flags.BoolVar(&c.verbose, "verbose", false, "") + flags.BoolVar(&c.json, "json", false, "") + flags.StringVar(&c.template, "t", "", "") + + if err := flags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) + return 1 + } + + typeArg = strings.ToLower(typeArg) + + // Check that we either got no arguments or exactly one. + args = flags.Args() + if len(args) > 1 { + c.Ui.Error("This command takes either no arguments or one: ") + c.Ui.Error(commandErrorText(c)) + return 1 + } + + // Truncate the id unless full length is requested + c.length = shortId + if c.verbose { + c.length = fullId + } + + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + id := "" + if len(args) == 1 { + id = args[0] + } + + code := c.csiStatus(client, id) + if code != 0 { + return code + } + + // Extend this section with other plugin implementations + + return 0 +} diff --git a/command/plugin_status_csi.go b/command/plugin_status_csi.go new file mode 100644 index 000000000..ef7d28101 --- /dev/null +++ b/command/plugin_status_csi.go @@ -0,0 +1,111 @@ +package command + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/nomad/api" +) + +func (c *PluginStatusCommand) csiBanner() { + if !(c.json || len(c.template) > 0) { + c.Ui.Output(c.Colorize().Color("[bold]Container Storage Interface[reset]")) + } +} + +func (c *PluginStatusCommand) csiStatus(client *api.Client, id string) int { + if id == "" { + c.csiBanner() + plugs, _, err := client.CSIPlugins().List(nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying CSI plugins: %s", err)) + return 1 + } + + if len(plugs) == 0 { + // No output if we have no plugins + c.Ui.Error("No CSI plugins") + } else { + str, err := c.csiFormatPlugins(plugs) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) + return 1 + } + c.Ui.Output(str) + } + return 0 + } + + // Lookup matched a single plugin + plug, _, err := client.CSIPlugins().Info(id, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying plugin: %s", err)) + return 1 + } + + str, err := c.csiFormatPlugin(plug) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error formatting plugin: %s", err)) + return 1 + } + + c.Ui.Output(str) + return 0 +} + +func (c *PluginStatusCommand) csiFormatPlugins(plugs []*api.CSIPluginListStub) (string, error) { + // Sort the output by quota name + sort.Slice(plugs, func(i, j int) bool { return plugs[i].ID < plugs[j].ID }) + + if c.json || len(c.template) > 0 { + out, err := Format(c.json, c.template, plugs) + if err != nil { + return "", fmt.Errorf("format error: %v", err) + } + return out, nil + } + + // TODO(langmartin) add Provider https://github.com/hashicorp/nomad/issues/7248 + rows := make([]string, len(plugs)+1) + rows[0] = "ID|Controllers Healthy/Expected|Nodes Healthy/Expected" + for i, p := range plugs { + rows[i+1] = fmt.Sprintf("%s|%d/%d|%d/%d", + limit(p.ID, c.length), + p.ControllersHealthy, + p.ControllersExpected, + p.NodesHealthy, + p.NodesExpected, + ) + } + return formatList(rows), nil +} + +func (c *PluginStatusCommand) csiFormatPlugin(plug *api.CSIPlugin) (string, error) { + if c.json || len(c.template) > 0 { + out, err := Format(c.json, c.template, plug) + if err != nil { + return "", fmt.Errorf("format error: %v", err) + } + return out, nil + } + + output := []string{ + fmt.Sprintf("ID|%s", plug.ID), + fmt.Sprintf("Controllers Healthy|%d", plug.ControllersHealthy), + fmt.Sprintf("Controllers Expected|%d", len(plug.Controllers)), + fmt.Sprintf("Nodes Healthy|%d", plug.NodesHealthy), + fmt.Sprintf("Nodes Expected|%d", len(plug.Nodes)), + } + + // Exit early + if c.short { + return formatKV(output), nil + } + + // Format the allocs + banner := c.Colorize().Color("\n[bold]Allocations[reset]") + allocs := formatAllocListStubs(plug.Allocations, c.verbose, c.length) + full := []string{formatKV(output), banner, allocs} + return strings.Join(full, "\n"), nil +} diff --git a/command/plugin_status_test.go b/command/plugin_status_test.go new file mode 100644 index 000000000..cdf38e67a --- /dev/null +++ b/command/plugin_status_test.go @@ -0,0 +1,57 @@ +package command + +import ( + "testing" + + "github.com/hashicorp/go-memdb" + "github.com/hashicorp/nomad/nomad" + "github.com/mitchellh/cli" + "github.com/posener/complete" + "github.com/stretchr/testify/require" +) + +func TestPluginStatusCommand_Implements(t *testing.T) { + t.Parallel() + var _ cli.Command = &PluginStatusCommand{} +} + +func TestPluginStatusCommand_Fails(t *testing.T) { + t.Parallel() + ui := new(cli.MockUi) + cmd := &PluginStatusCommand{Meta: Meta{Ui: ui}} + + // Fails on misuse + code := cmd.Run([]string{"some", "bad", "args"}) + require.Equal(t, 1, code) + + out := ui.ErrorWriter.String() + require.Contains(t, out, commandErrorText(cmd)) + ui.ErrorWriter.Reset() +} + +func TestPluginStatusCommand_AutocompleteArgs(t *testing.T) { + t.Parallel() + + srv, _, url := testServer(t, true, nil) + defer srv.Shutdown() + + ui := new(cli.MockUi) + cmd := &PluginStatusCommand{Meta: Meta{Ui: ui, flagAddress: url}} + + // Create a plugin + id := "long-plugin-id" + state := srv.Agent.Server().State() + cleanup := nomad.CreateTestCSIPlugin(state, id) + defer cleanup() + ws := memdb.NewWatchSet() + plug, err := state.CSIPluginByID(ws, id) + require.NoError(t, err) + + prefix := plug.ID[:len(plug.ID)-5] + args := complete.Args{Last: prefix} + predictor := cmd.AutocompleteArgs() + + res := predictor.Predict(args) + require.Equal(t, 1, len(res)) + require.Equal(t, plug.ID, res[0]) +} diff --git a/command/status.go b/command/status.go index 4cf028b39..c67a73f62 100644 --- a/command/status.go +++ b/command/status.go @@ -162,6 +162,10 @@ func (c *StatusCommand) Run(args []string) int { cmd = &NamespaceStatusCommand{Meta: c.Meta} case contexts.Quotas: cmd = &QuotaStatusCommand{Meta: c.Meta} + case contexts.Plugins: + cmd = &PluginStatusCommand{Meta: c.Meta} + case contexts.Volumes: + cmd = &VolumeStatusCommand{Meta: c.Meta} default: c.Ui.Error(fmt.Sprintf("Unable to resolve ID: %q", id)) return 1 diff --git a/command/volume.go b/command/volume.go new file mode 100644 index 000000000..83eb44093 --- /dev/null +++ b/command/volume.go @@ -0,0 +1,46 @@ +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +type VolumeCommand struct { + Meta +} + +func (c *VolumeCommand) Help() string { + helpText := ` +Usage: nomad volume [options] + + volume groups commands that interact with volumes. + + Register a new volume or update an existing volume: + + $ nomad volume register + + Examine the status of a volume: + + $ nomad volume status + + Deregister an unused volume: + + $ nomad volume deregister + + Please see the individual subcommand help for detailed usage information. +` + return strings.TrimSpace(helpText) +} + +func (c *VolumeCommand) Name() string { + return "volume" +} + +func (c *VolumeCommand) Synopsis() string { + return "Interact with volumes" +} + +func (c *VolumeCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/volume_deregister.go b/command/volume_deregister.go new file mode 100644 index 000000000..eafb14ea6 --- /dev/null +++ b/command/volume_deregister.go @@ -0,0 +1,88 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/nomad/api/contexts" + "github.com/posener/complete" +) + +type VolumeDeregisterCommand struct { + Meta +} + +func (c *VolumeDeregisterCommand) Help() string { + helpText := ` +Usage: nomad volume deregister [options] + + Remove an unused volume from Nomad. + +General Options: + + ` + generalOptionsUsage() + + return strings.TrimSpace(helpText) +} + +func (c *VolumeDeregisterCommand) AutocompleteFlags() complete.Flags { + return c.Meta.AutocompleteFlags(FlagSetClient) +} + +func (c *VolumeDeregisterCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictFunc(func(a complete.Args) []string { + client, err := c.Meta.Client() + if err != nil { + return nil + } + + // When multiple volume types are implemented, this search should merge contexts + resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Volumes, nil) + if err != nil { + return []string{} + } + return resp.Matches[contexts.Volumes] + }) +} + +func (c *VolumeDeregisterCommand) Synopsis() string { + return "Remove a volume" +} + +func (c *VolumeDeregisterCommand) Name() string { return "volume deregister" } + +func (c *VolumeDeregisterCommand) Run(args []string) int { + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + + if err := flags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) + return 1 + } + + // Check that we get exactly one argument + args = flags.Args() + if l := len(args); l != 1 { + c.Ui.Error("This command takes one argument: ") + c.Ui.Error(commandErrorText(c)) + return 1 + } + volID := args[0] + + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + // Deregister only works on CSI volumes, but could be extended to support other + // network interfaces or host volumes + err = client.CSIVolumes().Deregister(volID, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error deregistering volume: %s", err)) + return 1 + } + + return 0 +} diff --git a/command/volume_register.go b/command/volume_register.go new file mode 100644 index 000000000..83d0307ba --- /dev/null +++ b/command/volume_register.go @@ -0,0 +1,130 @@ +package command + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/posener/complete" +) + +type VolumeRegisterCommand struct { + Meta +} + +func (c *VolumeRegisterCommand) Help() string { + helpText := ` +Usage: nomad volume register [options] + + Creates or updates a volume in Nomad. The volume must exist on the remote + storage provider before it can be used by a task. + + If the supplied path is "-" the volume file is read from stdin. Otherwise, it + is read from the file at the supplied path. + +General Options: + + ` + generalOptionsUsage() + + return strings.TrimSpace(helpText) +} + +func (c *VolumeRegisterCommand) AutocompleteFlags() complete.Flags { + return c.Meta.AutocompleteFlags(FlagSetClient) +} + +func (c *VolumeRegisterCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictFiles("*") +} + +func (c *VolumeRegisterCommand) Synopsis() string { + return "Create or update a volume" +} + +func (c *VolumeRegisterCommand) Name() string { return "volume register" } + +func (c *VolumeRegisterCommand) Run(args []string) int { + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + + if err := flags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) + return 1 + } + + // Check that we get exactly one argument + args = flags.Args() + if l := len(args); l != 1 { + c.Ui.Error("This command takes one argument: ") + c.Ui.Error(commandErrorText(c)) + return 1 + } + + // Read the file contents + file := args[0] + var rawVolume []byte + var err error + if file == "-" { + rawVolume, err = ioutil.ReadAll(os.Stdin) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to read stdin: %v", err)) + return 1 + } + } else { + rawVolume, err = ioutil.ReadFile(file) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to read file: %v", err)) + return 1 + } + } + + ast, volType, err := parseVolumeType(string(rawVolume)) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing the volume type: %s", err)) + return 1 + } + volType = strings.ToLower(volType) + + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + switch volType { + case "csi": + code := c.csiRegister(client, ast) + if code != 0 { + return code + } + default: + c.Ui.Error(fmt.Sprintf("Error unknown volume type: %s", volType)) + return 1 + } + + return 0 +} + +// parseVolume is used to parse the quota specification from HCL +func parseVolumeType(input string) (*ast.File, string, error) { + // Parse the AST first + ast, err := hcl.Parse(input) + if err != nil { + return nil, "", fmt.Errorf("parse error: %v", err) + } + + // Decode the type, so we can dispatch on it + dispatch := &struct { + T string `hcl:"type"` + }{} + err = hcl.DecodeObject(dispatch, ast) + if err != nil { + return nil, "", fmt.Errorf("dispatch error: %v", err) + } + + return ast, dispatch.T, nil +} diff --git a/command/volume_register_csi.go b/command/volume_register_csi.go new file mode 100644 index 000000000..a7f74f2f3 --- /dev/null +++ b/command/volume_register_csi.go @@ -0,0 +1,44 @@ +package command + +import ( + "fmt" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" +) + +func (c *VolumeRegisterCommand) csiRegister(client *api.Client, ast *ast.File) int { + vol, err := csiDecodeVolume(ast) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error decoding the volume definition: %s", err)) + return 1 + } + _, err = client.CSIVolumes().Register(vol, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error registering volume: %s", err)) + return 1 + } + + return 0 +} + +// parseVolume is used to parse the quota specification from HCL +func csiDecodeVolume(input *ast.File) (*api.CSIVolume, error) { + output := &api.CSIVolume{} + err := hcl.DecodeObject(output, input) + if err != nil { + return nil, err + } + + // api.CSIVolume doesn't have the type field, it's used only for dispatch in + // parseVolumeType + helper.RemoveEqualFold(&output.ExtraKeysHCL, "type") + err = helper.UnusedKeys(output) + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/command/volume_register_test.go b/command/volume_register_test.go new file mode 100644 index 000000000..d707f6171 --- /dev/null +++ b/command/volume_register_test.go @@ -0,0 +1,97 @@ +package command + +import ( + "testing" + + "github.com/hashicorp/hcl" + "github.com/hashicorp/nomad/api" + "github.com/stretchr/testify/require" +) + +func TestVolumeDispatchParse(t *testing.T) { + t.Parallel() + + cases := []struct { + hcl string + t string + err string + }{{ + hcl: ` +type = "foo" +rando = "bar" +`, + t: "foo", + err: "", + }, { + hcl: `{"id": "foo", "type": "foo", "other": "bar"}`, + t: "foo", + err: "", + }} + + for _, c := range cases { + t.Run(c.hcl, func(t *testing.T) { + _, s, err := parseVolumeType(c.hcl) + require.Equal(t, c.t, s) + if c.err == "" { + require.NoError(t, err) + } else { + require.Contains(t, err.Error(), c.err) + } + + }) + } +} + +func TestCSIVolumeParse(t *testing.T) { + t.Parallel() + + cases := []struct { + hcl string + q *api.CSIVolume + err string + }{{ + hcl: ` +id = "foo" +type = "csi" +namespace = "n" +access_mode = "single-node-writer" +attachment_mode = "file-system" +plugin_id = "p" +`, + q: &api.CSIVolume{ + ID: "foo", + Namespace: "n", + AccessMode: "single-node-writer", + AttachmentMode: "file-system", + PluginID: "p", + }, + err: "", + }, { + hcl: ` +{"id": "foo", "namespace": "n", "type": "csi", "access_mode": "single-node-writer", "attachment_mode": "file-system", +"plugin_id": "p"} +`, + q: &api.CSIVolume{ + ID: "foo", + Namespace: "n", + AccessMode: "single-node-writer", + AttachmentMode: "file-system", + PluginID: "p", + }, + err: "", + }} + + for _, c := range cases { + t.Run(c.hcl, func(t *testing.T) { + ast, err := hcl.ParseString(c.hcl) + require.NoError(t, err) + vol, err := csiDecodeVolume(ast) + require.Equal(t, c.q, vol) + if c.err == "" { + require.NoError(t, err) + } else { + require.Contains(t, err.Error(), c.err) + } + }) + } +} diff --git a/command/volume_status.go b/command/volume_status.go new file mode 100644 index 000000000..a13be974f --- /dev/null +++ b/command/volume_status.go @@ -0,0 +1,134 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/nomad/api/contexts" + "github.com/posener/complete" +) + +type VolumeStatusCommand struct { + Meta + length int + short bool + verbose bool + json bool + template string +} + +func (c *VolumeStatusCommand) Help() string { + helpText := ` +Usage: nomad volume status [options] + + Display status information about a CSI volume. If no volume id is given, a + list of all volumes will be displayed. + +General Options: + + ` + generalOptionsUsage() + ` + +Status Options: + + -type + List only volumes of type . + + -short + Display short output. Used only when a single volume is being + queried, and drops verbose information about allocations. + + -verbose + Display full allocation information. + + -json + Output the allocation in its JSON format. + + -t + Format and display allocation using a Go template. +` + return strings.TrimSpace(helpText) +} + +func (c *VolumeStatusCommand) Synopsis() string { + return "Display status information about a volume" +} + +func (c *VolumeStatusCommand) AutocompleteFlags() complete.Flags { + return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), + complete.Flags{ + "-type": predictVolumeType, + "-short": complete.PredictNothing, + "-verbose": complete.PredictNothing, + "-json": complete.PredictNothing, + "-t": complete.PredictAnything, + }) +} + +func (c *VolumeStatusCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictFunc(func(a complete.Args) []string { + client, err := c.Meta.Client() + if err != nil { + return nil + } + + resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Volumes, nil) + if err != nil { + return []string{} + } + return resp.Matches[contexts.Volumes] + }) +} + +func (c *VolumeStatusCommand) Name() string { return "volume status" } + +func (c *VolumeStatusCommand) Run(args []string) int { + var typeArg string + + flags := c.Meta.FlagSet(c.Name(), FlagSetClient) + flags.Usage = func() { c.Ui.Output(c.Help()) } + flags.StringVar(&typeArg, "type", "", "") + flags.BoolVar(&c.short, "short", false, "") + flags.BoolVar(&c.verbose, "verbose", false, "") + flags.BoolVar(&c.json, "json", false, "") + flags.StringVar(&c.template, "t", "", "") + + if err := flags.Parse(args); err != nil { + c.Ui.Error(fmt.Sprintf("Error parsing arguments %s", err)) + return 1 + } + + // Check that we either got no arguments or exactly one + args = flags.Args() + if len(args) > 1 { + c.Ui.Error("This command takes either no arguments or one: ") + c.Ui.Error(commandErrorText(c)) + return 1 + } + + // Truncate the id unless full length is requested + c.length = shortId + if c.verbose { + c.length = fullId + } + + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 1 + } + + id := "" + if len(args) == 1 { + id = args[0] + } + + code := c.csiStatus(client, id) + if code != 0 { + return code + } + + // Extend this section with other volume implementations + + return 0 +} diff --git a/command/volume_status_csi.go b/command/volume_status_csi.go new file mode 100644 index 000000000..27b9dd566 --- /dev/null +++ b/command/volume_status_csi.go @@ -0,0 +1,152 @@ +package command + +import ( + "fmt" + "sort" + "strings" + + "github.com/hashicorp/nomad/api" +) + +func (c *VolumeStatusCommand) csiBanner() { + if !(c.json || len(c.template) > 0) { + c.Ui.Output(c.Colorize().Color("[bold]Container Storage Interface[reset]")) + } +} + +func (c *VolumeStatusCommand) csiStatus(client *api.Client, id string) int { + // Invoke list mode if no volume id + if id == "" { + c.csiBanner() + vols, _, err := client.CSIVolumes().List(nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying volumes: %s", err)) + return 1 + } + + if len(vols) == 0 { + // No output if we have no volumes + c.Ui.Error("No CSI volumes") + } else { + str, err := c.csiFormatVolumes(vols) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error formatting: %s", err)) + return 1 + } + c.Ui.Output(str) + } + return 0 + } + + // Try querying the volume + vol, _, err := client.CSIVolumes().Info(id, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying volume: %s", err)) + return 1 + } + + str, err := c.formatBasic(vol) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error formatting volume: %s", err)) + return 1 + } + c.Ui.Output(str) + + return 0 +} + +func (c *VolumeStatusCommand) csiFormatVolumes(vols []*api.CSIVolumeListStub) (string, error) { + // Sort the output by volume id + sort.Slice(vols, func(i, j int) bool { return vols[i].ID < vols[j].ID }) + + if c.json || len(c.template) > 0 { + out, err := Format(c.json, c.template, vols) + if err != nil { + return "", fmt.Errorf("format error: %v", err) + } + return out, nil + } + + rows := make([]string, len(vols)+1) + rows[0] = "ID|Name|Plugin ID|Schedulable|Access Mode" + for i, v := range vols { + rows[i+1] = fmt.Sprintf("%s|%s|%s|%t|%s", + limit(v.ID, c.length), + v.Name, + v.PluginID, + v.Schedulable, + v.AccessMode, + ) + } + return formatList(rows), nil +} + +func (c *VolumeStatusCommand) formatBasic(vol *api.CSIVolume) (string, error) { + if c.json || len(c.template) > 0 { + out, err := Format(c.json, c.template, vol) + if err != nil { + return "", fmt.Errorf("format error: %v", err) + } + return out, nil + } + + // TODO(langmartin) add Provider https://github.com/hashicorp/nomad/issues/7248 + output := []string{ + fmt.Sprintf("ID|%s", vol.ID), + fmt.Sprintf("Name|%s", vol.Name), + fmt.Sprintf("External ID|%s", vol.ExternalID), + + fmt.Sprintf("Schedulable|%t", vol.Schedulable), + fmt.Sprintf("Controllers Healthy|%d", vol.ControllersHealthy), + fmt.Sprintf("Controllers Expected|%d", vol.ControllersExpected), + fmt.Sprintf("Nodes Healthy|%d", vol.NodesHealthy), + fmt.Sprintf("Nodes Expected|%d", vol.NodesExpected), + + fmt.Sprintf("Access Mode|%s", vol.AccessMode), + fmt.Sprintf("Attachment Mode|%s", vol.AttachmentMode), + fmt.Sprintf("Namespace|%s", vol.Namespace), + } + + // Exit early + if c.short { + return formatKV(output), nil + } + + // Format the allocs + banner := c.Colorize().Color("\n[bold]Allocations[reset]") + allocs := formatAllocListStubs(vol.Allocations, c.verbose, c.length) + full := []string{formatKV(output), banner, allocs} + return strings.Join(full, "\n"), nil +} + +func (c *VolumeStatusCommand) formatTopologies(vol *api.CSIVolume) string { + var out []string + + // Find the union of all the keys + head := map[string]string{} + for _, t := range vol.Topologies { + for key := range t.Segments { + if _, ok := head[key]; !ok { + head[key] = "" + } + } + } + + // Append the header + var line []string + for key := range head { + line = append(line, key) + } + out = append(out, strings.Join(line, " ")) + + // Append each topology + for _, t := range vol.Topologies { + line = []string{} + for key := range head { + line = append(line, t.Segments[key]) + } + out = append(out, strings.Join(line, " ")) + } + + return strings.Join(out, "\n") +} diff --git a/command/volume_status_test.go b/command/volume_status_test.go new file mode 100644 index 000000000..a3c6a5b20 --- /dev/null +++ b/command/volume_status_test.go @@ -0,0 +1,58 @@ +package command + +import ( + "testing" + + "github.com/hashicorp/nomad/helper/uuid" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/mitchellh/cli" + "github.com/posener/complete" + "github.com/stretchr/testify/require" +) + +func TestCSIVolumeStatusCommand_Implements(t *testing.T) { + t.Parallel() + var _ cli.Command = &VolumeStatusCommand{} +} + +func TestCSIVolumeStatusCommand_Fails(t *testing.T) { + t.Parallel() + ui := new(cli.MockUi) + cmd := &VolumeStatusCommand{Meta: Meta{Ui: ui}} + + // Fails on misuse + code := cmd.Run([]string{"some", "bad", "args"}) + require.Equal(t, 1, code) + + out := ui.ErrorWriter.String() + require.Contains(t, out, commandErrorText(cmd)) + ui.ErrorWriter.Reset() +} + +func TestCSIVolumeStatusCommand_AutocompleteArgs(t *testing.T) { + t.Parallel() + + srv, _, url := testServer(t, true, nil) + defer srv.Shutdown() + + ui := new(cli.MockUi) + cmd := &VolumeStatusCommand{Meta: Meta{Ui: ui, flagAddress: url}} + + state := srv.Agent.Server().State() + + vol := &structs.CSIVolume{ + ID: uuid.Generate(), + Namespace: "default", + PluginID: "glade", + } + + require.NoError(t, state.CSIVolumeRegister(1000, []*structs.CSIVolume{vol})) + + prefix := vol.ID[:len(vol.ID)-5] + args := complete.Args{Last: prefix} + predictor := cmd.AutocompleteArgs() + + res := predictor.Predict(args) + require.Equal(t, 1, len(res)) + require.Equal(t, vol.ID, res[0]) +} diff --git a/helper/funcs.go b/helper/funcs.go index 7a6b4c151..c75294a1b 100644 --- a/helper/funcs.go +++ b/helper/funcs.go @@ -3,7 +3,9 @@ package helper import ( "crypto/sha512" "fmt" + "reflect" "regexp" + "strings" "time" multierror "github.com/hashicorp/go-multierror" @@ -387,3 +389,75 @@ func CheckHCLKeys(node ast.Node, valid []string) error { return result } + +// UnusedKeys returns a pretty-printed error if any `hcl:",unusedKeys"` is not empty +func UnusedKeys(obj interface{}) error { + val := reflect.ValueOf(obj) + if val.Kind() == reflect.Ptr { + val = reflect.Indirect(val) + } + return unusedKeysImpl([]string{}, val) +} + +func unusedKeysImpl(path []string, val reflect.Value) error { + stype := val.Type() + for i := 0; i < stype.NumField(); i++ { + ftype := stype.Field(i) + fval := val.Field(i) + tags := strings.Split(ftype.Tag.Get("hcl"), ",") + name := tags[0] + tags = tags[1:] + + if fval.Kind() == reflect.Ptr { + fval = reflect.Indirect(fval) + } + + // struct? recurse. Add the struct's key to the path + if fval.Kind() == reflect.Struct { + err := unusedKeysImpl(append([]string{name}, path...), fval) + if err != nil { + return err + } + continue + } + + // Search the hcl tags for "unusedKeys" + unusedKeys := false + for _, p := range tags { + if p == "unusedKeys" { + unusedKeys = true + break + } + } + + if unusedKeys { + ks, ok := fval.Interface().([]string) + if ok && len(ks) != 0 { + ps := "" + if len(path) > 0 { + ps = strings.Join(path, ".") + " " + } + return fmt.Errorf("%sunexpected keys %s", + ps, + strings.Join(ks, ", ")) + } + } + } + return nil +} + +// RemoveEqualFold removes the first string that EqualFold matches. It updates xs in place +func RemoveEqualFold(xs *[]string, search string) { + sl := *xs + for i, x := range sl { + if strings.EqualFold(x, search) { + sl = append(sl[:i], sl[i+1:]...) + if len(sl) == 0 { + *xs = nil + } else { + *xs = sl + } + return + } + } +} diff --git a/nomad/client_rpc.go b/nomad/client_rpc.go index ca8db2336..0c5d611e5 100644 --- a/nomad/client_rpc.go +++ b/nomad/client_rpc.go @@ -219,20 +219,20 @@ func NodeRpc(session *yamux.Session, method string, args, reply interface{}) err // Open a new session stream, err := session.Open() if err != nil { - return err + return fmt.Errorf("session open: %v", err) } defer stream.Close() // Write the RpcNomad byte to set the mode if _, err := stream.Write([]byte{byte(pool.RpcNomad)}); err != nil { stream.Close() - return err + return fmt.Errorf("set mode: %v", err) } // Make the RPC err = msgpackrpc.CallWithCodec(pool.NewClientCodec(stream), method, args, reply) if err != nil { - return err + return fmt.Errorf("rpc call: %v", err) } return nil diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index fedb37801..dbd33ad9a 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" - "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/require" @@ -483,7 +482,7 @@ func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { ns := structs.DefaultNamespace - deleteNodes := CreateTestPlugin(srv.fsm.State(), "foo") + deleteNodes := CreateTestCSIPlugin(srv.fsm.State(), "foo") defer deleteNodes() state := srv.fsm.State() @@ -527,74 +526,6 @@ func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { require.Nil(t, resp2.Plugin) } -// CreateTestPlugin is a helper that generates the node + fingerprint results necessary to -// create a CSIPlugin by directly inserting into the state store. It's exported for use in -// other test packages -func CreateTestPlugin(s *state.StateStore, id string) func() { - // Create some nodes - ns := make([]*structs.Node, 3) - for i := range ns { - n := mock.Node() - ns[i] = n - } - - // Install healthy plugin fingerprinting results - ns[0].CSIControllerPlugins = map[string]*structs.CSIInfo{ - id: { - PluginID: id, - AllocID: uuid.Generate(), - Healthy: true, - HealthDescription: "healthy", - RequiresControllerPlugin: true, - RequiresTopologies: false, - ControllerInfo: &structs.CSIControllerInfo{ - SupportsReadOnlyAttach: true, - SupportsAttachDetach: true, - SupportsListVolumes: true, - SupportsListVolumesAttachedNodes: false, - }, - }, - } - - // Install healthy plugin fingerprinting results - allocID := uuid.Generate() - for _, n := range ns[1:] { - n.CSINodePlugins = map[string]*structs.CSIInfo{ - id: { - PluginID: id, - AllocID: allocID, - Healthy: true, - HealthDescription: "healthy", - RequiresControllerPlugin: true, - RequiresTopologies: false, - NodeInfo: &structs.CSINodeInfo{ - ID: n.ID, - MaxVolumes: 64, - RequiresNodeStageVolume: true, - }, - }, - } - } - - // Insert them into the state store - index := uint64(999) - for _, n := range ns { - index++ - s.UpsertNode(index, n) - } - - // Return cleanup function that deletes the nodes - return func() { - ids := make([]string, len(ns)) - for i, n := range ns { - ids[i] = n.ID - } - - index++ - s.DeleteNode(index, ids) - } -} - func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { srv, shutdown := TestServer(t, func(c *Config) {}) defer shutdown() diff --git a/nomad/search_endpoint.go b/nomad/search_endpoint.go index 4e1b3acfa..3e5eea504 100644 --- a/nomad/search_endpoint.go +++ b/nomad/search_endpoint.go @@ -29,6 +29,8 @@ var ( structs.Nodes, structs.Evals, structs.Deployments, + structs.Plugins, + structs.Volumes, } ) @@ -52,15 +54,19 @@ func (s *Search) getMatches(iter memdb.ResultIterator, prefix string) ([]string, var id string switch t := raw.(type) { case *structs.Job: - id = raw.(*structs.Job).ID + id = t.ID case *structs.Evaluation: - id = raw.(*structs.Evaluation).ID + id = t.ID case *structs.Allocation: - id = raw.(*structs.Allocation).ID + id = t.ID case *structs.Node: - id = raw.(*structs.Node).ID + id = t.ID case *structs.Deployment: - id = raw.(*structs.Deployment).ID + id = t.ID + case *structs.CSIPlugin: + id = t.ID + case *structs.CSIVolume: + id = t.ID default: matchID, ok := getEnterpriseMatch(raw) if !ok { @@ -95,6 +101,10 @@ func getResourceIter(context structs.Context, aclObj *acl.ACL, namespace, prefix return state.NodesByIDPrefix(ws, prefix) case structs.Deployments: return state.DeploymentsByIDPrefix(ws, namespace, prefix) + case structs.Plugins: + return state.CSIPluginsByIDPrefix(ws, prefix) + case structs.Volumes: + return state.CSIVolumesByIDPrefix(ws, namespace, prefix) default: return getEnterpriseResourceIter(context, aclObj, namespace, prefix, ws, state) } diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index 31c3ae72c..3ee3d3afb 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -7,10 +7,12 @@ import ( msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" + "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) const jobIndex = 1000 @@ -746,3 +748,77 @@ func TestSearch_PrefixSearch_MultiRegion(t *testing.T) { assert.Equal(job.ID, resp.Matches[structs.Jobs][0]) assert.Equal(uint64(jobIndex), resp.Index) } + +func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) { + t.Parallel() + assert := assert.New(t) + + s, cleanupS := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 + }) + defer cleanupS() + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + id := uuid.Generate() + CreateTestCSIPlugin(s.fsm.State(), id) + + prefix := id[:len(id)-2] + + req := &structs.SearchRequest{ + Prefix: prefix, + Context: structs.Plugins, + QueryOptions: structs.QueryOptions{ + Region: "global", + }, + } + + var resp structs.SearchResponse + if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + assert.Equal(1, len(resp.Matches[structs.Plugins])) + assert.Equal(id, resp.Matches[structs.Plugins][0]) + assert.Equal(resp.Truncations[structs.Plugins], false) +} + +func TestSearch_PrefixSearch_CSIVolume(t *testing.T) { + t.Parallel() + assert := assert.New(t) + + s, cleanupS := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 + }) + defer cleanupS() + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + id := uuid.Generate() + err := s.fsm.State().CSIVolumeRegister(1000, []*structs.CSIVolume{{ + ID: id, + Namespace: structs.DefaultNamespace, + PluginID: "glade", + }}) + require.NoError(t, err) + + prefix := id[:len(id)-2] + + req := &structs.SearchRequest{ + Prefix: prefix, + Context: structs.Volumes, + QueryOptions: structs.QueryOptions{ + Region: "global", + Namespace: structs.DefaultNamespace, + }, + } + + var resp structs.SearchResponse + if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil { + t.Fatalf("err: %v", err) + } + + assert.Equal(1, len(resp.Matches[structs.Volumes])) + assert.Equal(id, resp.Matches[structs.Volumes][0]) + assert.Equal(resp.Truncations[structs.Volumes], false) +} diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 2b1567d7a..c52653dee 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1657,7 +1657,7 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, id string) (*structs.CSIVolume, error) { txn := s.db.Txn(false) - watchCh, obj, err := txn.FirstWatch("csi_volumes", "id", id) + watchCh, obj, err := txn.FirstWatch("csi_volumes", "id_prefix", id) if err != nil { return nil, fmt.Errorf("volume lookup failed: %s %v", id, err) } @@ -1684,6 +1684,30 @@ func (s *StateStore) CSIVolumesByPluginID(ws memdb.WatchSet, pluginID string) (m return iter, nil } +// CSIVolumesByIDPrefix supports search +func (s *StateStore) CSIVolumesByIDPrefix(ws memdb.WatchSet, namespace, volumeID string) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + + iter, err := txn.Get("csi_volumes", "id_prefix", volumeID) + if err != nil { + return nil, err + } + + ws.Add(iter.WatchCh()) + + // Filter the iterator by namespace + f := func(raw interface{}) bool { + v, ok := raw.(*structs.CSIVolume) + if !ok { + return false + } + return v.Namespace != namespace + } + + wrap := memdb.NewFilterIterator(iter, f) + return wrap, nil +} + // CSIVolumes looks up the entire csi_volumes table func (s *StateStore) CSIVolumes(ws memdb.WatchSet) (memdb.ResultIterator, error) { txn := s.db.Txn(false) @@ -1741,7 +1765,7 @@ func (s *StateStore) CSIVolumeDeregister(index uint64, ids []string) error { defer txn.Abort() for _, id := range ids { - existing, err := txn.First("csi_volumes", "id", id) + existing, err := txn.First("csi_volumes", "id_prefix", id) if err != nil { return fmt.Errorf("volume lookup failed: %s: %v", id, err) } @@ -1837,12 +1861,26 @@ func (s *StateStore) CSIPlugins(ws memdb.WatchSet) (memdb.ResultIterator, error) return iter, nil } +// CSIPluginsByIDPrefix supports search +func (s *StateStore) CSIPluginsByIDPrefix(ws memdb.WatchSet, pluginID string) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + + iter, err := txn.Get("csi_plugins", "id_prefix", pluginID) + if err != nil { + return nil, err + } + + ws.Add(iter.WatchCh()) + + return iter, nil +} + // CSIPluginByID returns the one named CSIPlugin func (s *StateStore) CSIPluginByID(ws memdb.WatchSet, id string) (*structs.CSIPlugin, error) { txn := s.db.Txn(false) defer txn.Abort() - raw, err := txn.First("csi_plugins", "id", id) + raw, err := txn.First("csi_plugins", "id_prefix", id) if err != nil { return nil, fmt.Errorf("csi_plugin lookup failed: %s %v", id, err) } diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 7a758605a..3c1fa2b6c 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -163,6 +163,8 @@ const ( Namespaces Context = "namespaces" Quotas Context = "quotas" All Context = "all" + Plugins Context = "plugins" + Volumes Context = "volumes" ) // NamespacedID is a tuple of an ID and a namespace diff --git a/nomad/testing.go b/nomad/testing.go index 3beeeb370..c5593eeaa 100644 --- a/nomad/testing.go +++ b/nomad/testing.go @@ -15,7 +15,9 @@ import ( "github.com/hashicorp/nomad/helper/pluginutils/catalog" "github.com/hashicorp/nomad/helper/pluginutils/singleton" "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" + "github.com/hashicorp/nomad/nomad/state" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/version" ) @@ -154,3 +156,71 @@ func TestJoin(t testing.T, s1 *Server, other ...*Server) { } } } + +// CreateTestPlugin is a helper that generates the node + fingerprint results necessary to +// create a CSIPlugin by directly inserting into the state store. It's exported for use in +// other test packages +func CreateTestCSIPlugin(s *state.StateStore, id string) func() { + // Create some nodes + ns := make([]*structs.Node, 3) + for i := range ns { + n := mock.Node() + ns[i] = n + } + + // Install healthy plugin fingerprinting results + ns[0].CSIControllerPlugins = map[string]*structs.CSIInfo{ + id: { + PluginID: id, + AllocID: uuid.Generate(), + Healthy: true, + HealthDescription: "healthy", + RequiresControllerPlugin: true, + RequiresTopologies: false, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsReadOnlyAttach: true, + SupportsAttachDetach: true, + SupportsListVolumes: true, + SupportsListVolumesAttachedNodes: false, + }, + }, + } + + // Install healthy plugin fingerprinting results + allocID := uuid.Generate() + for _, n := range ns[1:] { + n.CSINodePlugins = map[string]*structs.CSIInfo{ + id: { + PluginID: id, + AllocID: allocID, + Healthy: true, + HealthDescription: "healthy", + RequiresControllerPlugin: true, + RequiresTopologies: false, + NodeInfo: &structs.CSINodeInfo{ + ID: n.ID, + MaxVolumes: 64, + RequiresNodeStageVolume: true, + }, + }, + } + } + + // Insert them into the state store + index := uint64(999) + for _, n := range ns { + index++ + s.UpsertNode(index, n) + } + + // Return cleanup function that deletes the nodes + return func() { + ids := make([]string, len(ns)) + for i, n := range ns { + ids[i] = n.ID + } + + index++ + s.DeleteNode(index, ids) + } +} From de4ad6ca38dc31a682ed0461fed114d4702ceacc Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 9 Mar 2020 09:57:59 -0400 Subject: [PATCH 107/126] csi: add Provider field to CSI CLIs and APIs (#7285) Derive a provider name and version for plugins (and the volumes that use them) from the CSI identity API `GetPluginInfo`. Expose the vendor name as `Provider` in the API and CLI commands. --- api/csi.go | 6 ++++ .../taskrunner/plugin_supervisor_hook.go | 17 ++++++++++- .../pluginmanager/csimanager/fingerprint.go | 2 ++ command/alloc_status.go | 5 ++-- command/plugin_status_csi.go | 8 ++++-- command/volume_status_csi.go | 5 ++-- nomad/csi_endpoint.go | 20 +++++++++---- nomad/state/state_store.go | 4 +++ nomad/structs/csi.go | 8 ++++++ nomad/structs/node.go | 3 ++ plugins/csi/client.go | 28 +++++++++++-------- plugins/csi/client_test.go | 25 ++++++++++------- plugins/csi/fake/client.go | 11 ++++---- plugins/csi/plugin.go | 3 +- 14 files changed, 104 insertions(+), 41 deletions(-) diff --git a/api/csi.go b/api/csi.go index 8d5883221..a97b797e1 100644 --- a/api/csi.go +++ b/api/csi.go @@ -101,6 +101,8 @@ type CSIVolume struct { // Schedulable is true if all the denormalized plugin health fields are true Schedulable bool PluginID string `hcl:"plugin_id"` + Provider string + ProviderVersion string ControllerRequired bool ControllersHealthy int ControllersExpected int @@ -151,6 +153,7 @@ type CSIVolumeListStub struct { AttachmentMode CSIVolumeAttachmentMode Schedulable bool PluginID string + Provider string ControllerRequired bool ControllersHealthy int ControllersExpected int @@ -179,6 +182,8 @@ type CSIPlugins struct { type CSIPlugin struct { ID string + Provider string + Version string ControllerRequired bool // Map Node.ID to CSIInfo fingerprint results Controllers map[string]*CSIInfo @@ -192,6 +197,7 @@ type CSIPlugin struct { type CSIPluginListStub struct { ID string + Provider string ControllerRequired bool ControllersHealthy int ControllersExpected int diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index f44ce5bf1..6f3a3b8b3 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -256,16 +256,31 @@ WAITFORREADY: } func (h *csiPluginSupervisorHook) registerPlugin(socketPath string) (func(), error) { + + // At this point we know the plugin is ready and we can fingerprint it + // to get its vendor name and version + client, err := csi.NewClient(socketPath, h.logger.Named("csi_client").With("plugin.name", h.task.CSIPluginConfig.ID, "plugin.type", h.task.CSIPluginConfig.Type)) + defer client.Close() + if err != nil { + return nil, fmt.Errorf("failed to create csi client: %v", err) + } + + info, err := client.PluginInfo() + if err != nil { + return nil, fmt.Errorf("failed to probe plugin: %v", err) + } + mkInfoFn := func(pluginType string) *dynamicplugins.PluginInfo { return &dynamicplugins.PluginInfo{ Type: pluginType, Name: h.task.CSIPluginConfig.ID, - Version: "1.0.0", + Version: info.PluginVersion, ConnectionInfo: &dynamicplugins.PluginConnectionInfo{ SocketPath: socketPath, }, AllocID: h.alloc.ID, Options: map[string]string{ + "Provider": info.Name, // vendor name "MountPoint": h.mountPoint, "ContainerMountPoint": h.task.CSIPluginConfig.MountDir, }, diff --git a/client/pluginmanager/csimanager/fingerprint.go b/client/pluginmanager/csimanager/fingerprint.go index fa60e87f1..b0da9c8fc 100644 --- a/client/pluginmanager/csimanager/fingerprint.go +++ b/client/pluginmanager/csimanager/fingerprint.go @@ -86,6 +86,8 @@ func (p *pluginFingerprinter) buildBasicFingerprint(ctx context.Context) (*struc info := &structs.CSIInfo{ PluginID: p.info.Name, AllocID: p.info.AllocID, + Provider: p.info.Options["Provider"], + ProviderVersion: p.info.Version, Healthy: false, HealthDescription: "initial fingerprint not completed", } diff --git a/command/alloc_status.go b/command/alloc_status.go index b8d860257..666e468a3 100644 --- a/command/alloc_status.go +++ b/command/alloc_status.go @@ -776,8 +776,9 @@ FOUND: } csiVolumesOutput = append(csiVolumesOutput, fmt.Sprintf("%s|%s|%s|%v|%v|%s", - volReq.Name, vol.PluginID, - "n/a", // TODO(tgross): https://github.com/hashicorp/nomad/issues/7248 + volReq.Name, + vol.PluginID, + vol.Provider, vol.Schedulable, volReq.ReadOnly, "n/a", // TODO(tgross): https://github.com/hashicorp/nomad/issues/7007 diff --git a/command/plugin_status_csi.go b/command/plugin_status_csi.go index ef7d28101..36f1284bd 100644 --- a/command/plugin_status_csi.go +++ b/command/plugin_status_csi.go @@ -66,12 +66,12 @@ func (c *PluginStatusCommand) csiFormatPlugins(plugs []*api.CSIPluginListStub) ( return out, nil } - // TODO(langmartin) add Provider https://github.com/hashicorp/nomad/issues/7248 rows := make([]string, len(plugs)+1) - rows[0] = "ID|Controllers Healthy/Expected|Nodes Healthy/Expected" + rows[0] = "ID|Provider|Controllers Healthy/Expected|Nodes Healthy/Expected" for i, p := range plugs { - rows[i+1] = fmt.Sprintf("%s|%d/%d|%d/%d", + rows[i+1] = fmt.Sprintf("%s|%s|%d/%d|%d/%d", limit(p.ID, c.length), + p.Provider, p.ControllersHealthy, p.ControllersExpected, p.NodesHealthy, @@ -92,6 +92,8 @@ func (c *PluginStatusCommand) csiFormatPlugin(plug *api.CSIPlugin) (string, erro output := []string{ fmt.Sprintf("ID|%s", plug.ID), + fmt.Sprintf("Provider|%s", plug.Provider), + fmt.Sprintf("Version|%s", plug.Version), fmt.Sprintf("Controllers Healthy|%d", plug.ControllersHealthy), fmt.Sprintf("Controllers Expected|%d", len(plug.Controllers)), fmt.Sprintf("Nodes Healthy|%d", plug.NodesHealthy), diff --git a/command/volume_status_csi.go b/command/volume_status_csi.go index 27b9dd566..4838c009c 100644 --- a/command/volume_status_csi.go +++ b/command/volume_status_csi.go @@ -90,12 +90,13 @@ func (c *VolumeStatusCommand) formatBasic(vol *api.CSIVolume) (string, error) { return out, nil } - // TODO(langmartin) add Provider https://github.com/hashicorp/nomad/issues/7248 output := []string{ fmt.Sprintf("ID|%s", vol.ID), fmt.Sprintf("Name|%s", vol.Name), fmt.Sprintf("External ID|%s", vol.ExternalID), - + fmt.Sprintf("Plugin ID|%s", vol.PluginID), + fmt.Sprintf("Provider|%s", vol.Provider), + fmt.Sprintf("Version|%s", vol.ProviderVersion), fmt.Sprintf("Schedulable|%t", vol.Schedulable), fmt.Sprintf("Controllers Healthy|%d", vol.ControllersHealthy), fmt.Sprintf("Controllers Expected|%d", vol.ControllersExpected), diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 91aa6a943..3a6bdbc33 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -201,18 +201,25 @@ func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVol return v.srv.blockingRPC(&opts) } -func (srv *Server) controllerValidateVolume(req *structs.CSIVolumeRegisterRequest, vol *structs.CSIVolume) error { +func (srv *Server) pluginValidateVolume(req *structs.CSIVolumeRegisterRequest, vol *structs.CSIVolume) (*structs.CSIPlugin, error) { state := srv.fsm.State() ws := memdb.NewWatchSet() plugin, err := state.CSIPluginByID(ws, vol.PluginID) if err != nil { - return err + return nil, err } if plugin == nil { - return fmt.Errorf("no CSI plugin named: %s could be found", vol.PluginID) + return nil, fmt.Errorf("no CSI plugin named: %s could be found", vol.PluginID) } + vol.Provider = plugin.Provider + vol.ProviderVersion = plugin.Version + return plugin, nil +} + +func (srv *Server) controllerValidateVolume(req *structs.CSIVolumeRegisterRequest, vol *structs.CSIVolume, plugin *structs.CSIPlugin) error { + if !plugin.ControllerRequired { // The plugin does not require a controller, so for now we won't do any // further validation of the volume. @@ -271,8 +278,11 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru if err = vol.Validate(); err != nil { return err } - - if err := v.srv.controllerValidateVolume(args, vol); err != nil { + plugin, err := v.srv.pluginValidateVolume(args, vol) + if err != nil { + return err + } + if err := v.srv.controllerValidateVolume(args, vol, plugin); err != nil { return err } } diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index c52653dee..d82bfaa98 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -958,6 +958,8 @@ func upsertNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) erro } else { plug = structs.NewCSIPlugin(info.PluginID, index) plug.ControllerRequired = info.RequiresControllerPlugin + plug.Provider = info.Provider + plug.Version = info.ProviderVersion } plug.AddPlugin(node.ID, info) @@ -1811,6 +1813,8 @@ func (s *StateStore) CSIVolumeDenormalizePlugins(ws memdb.WatchSet, vol *structs return vol, nil } + vol.Provider = plug.Provider + vol.ProviderVersion = plug.Version vol.ControllerRequired = plug.ControllerRequired vol.ControllersHealthy = plug.ControllersHealthy vol.NodesHealthy = plug.NodesHealthy diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 77709b362..4e65ec6a7 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -156,6 +156,8 @@ type CSIVolume struct { // volume has not been marked for garbage collection Schedulable bool PluginID string + Provider string + ProviderVersion string ControllerRequired bool ControllersHealthy int ControllersExpected int @@ -180,6 +182,7 @@ type CSIVolListStub struct { CurrentWriters int Schedulable bool PluginID string + Provider string ControllersHealthy int ControllersExpected int NodesHealthy int @@ -222,6 +225,7 @@ func (v *CSIVolume) Stub() *CSIVolListStub { CurrentWriters: len(v.WriteAllocs), Schedulable: v.Schedulable, PluginID: v.PluginID, + Provider: v.Provider, ControllersHealthy: v.ControllersHealthy, NodesHealthy: v.NodesHealthy, NodesExpected: v.NodesExpected, @@ -466,6 +470,8 @@ type CSIVolumeGetResponse struct { // CSIPlugin collects fingerprint info context for the plugin for clients type CSIPlugin struct { ID string + Provider string // the vendor name from CSI GetPluginInfoResponse + Version string // the vendor verson from CSI GetPluginInfoResponse ControllerRequired bool // Map Node.IDs to fingerprint results, split by type. Monolith type plugins have @@ -561,6 +567,7 @@ func (p *CSIPlugin) DeleteNode(nodeID string) { type CSIPluginListStub struct { ID string + Provider string ControllerRequired bool ControllersHealthy int ControllersExpected int @@ -573,6 +580,7 @@ type CSIPluginListStub struct { func (p *CSIPlugin) Stub() *CSIPluginListStub { return &CSIPluginListStub{ ID: p.ID, + Provider: p.Provider, ControllerRequired: p.ControllerRequired, ControllersHealthy: p.ControllersHealthy, ControllersExpected: len(p.Controllers), diff --git a/nomad/structs/node.go b/nomad/structs/node.go index d9d0062fc..b8d4ea5f9 100644 --- a/nomad/structs/node.go +++ b/nomad/structs/node.go @@ -151,6 +151,9 @@ type CSIInfo struct { HealthDescription string UpdateTime time.Time + Provider string // vendor name from CSI GetPluginInfoResponse + ProviderVersion string // vendor version from CSI GetPluginInfoResponse + // RequiresControllerPlugin is set when the CSI Plugin returns the // CONTROLLER_SERVICE capability. When this is true, the volumes should not be // scheduled on this client until a matching controller plugin is available. diff --git a/plugins/csi/client.go b/plugins/csi/client.go index 00d3df9fe..d558b0331 100644 --- a/plugins/csi/client.go +++ b/plugins/csi/client.go @@ -15,6 +15,9 @@ import ( "google.golang.org/grpc" ) +// PluginTypeCSI implements the CSI plugin interface +const PluginTypeCSI = "csi" + type NodeGetInfoResponse struct { NodeID string MaxVolumes int64 @@ -126,15 +129,15 @@ func newGrpcConn(addr string, logger hclog.Logger) (*grpc.ClientConn, error) { // PluginInfo describes the type and version of a plugin as required by the nomad // base.BasePlugin interface. func (c *client) PluginInfo() (*base.PluginInfoResponse, error) { - name, err := c.PluginGetInfo(context.TODO()) + name, version, err := c.PluginGetInfo(context.TODO()) if err != nil { return nil, err } return &base.PluginInfoResponse{ - Type: "csi", - PluginApiVersions: []string{"1.0.0"}, // TODO: fingerprint csi version - PluginVersion: "1.0.0", // TODO: get plugin version from somewhere?! + Type: PluginTypeCSI, // note: this isn't a Nomad go-plugin type + PluginApiVersions: []string{"1.0.0"}, // TODO(tgross): we want to fingerprint spec version, but this isn't included as a field from the plugins + PluginVersion: version, Name: name, }, nil } @@ -172,25 +175,26 @@ func (c *client) PluginProbe(ctx context.Context) (bool, error) { return ready, nil } -func (c *client) PluginGetInfo(ctx context.Context) (string, error) { +func (c *client) PluginGetInfo(ctx context.Context) (string, string, error) { if c == nil { - return "", fmt.Errorf("Client not initialized") + return "", "", fmt.Errorf("Client not initialized") } if c.identityClient == nil { - return "", fmt.Errorf("Client not initialized") + return "", "", fmt.Errorf("Client not initialized") } - req, err := c.identityClient.GetPluginInfo(ctx, &csipbv1.GetPluginInfoRequest{}) + resp, err := c.identityClient.GetPluginInfo(ctx, &csipbv1.GetPluginInfoRequest{}) if err != nil { - return "", err + return "", "", err } - name := req.GetName() + name := resp.GetName() if name == "" { - return "", fmt.Errorf("PluginGetInfo: plugin returned empty name field") + return "", "", fmt.Errorf("PluginGetInfo: plugin returned empty name field") } + version := resp.GetVendorVersion() - return name, nil + return name, version, nil } func (c *client) PluginGetCapabilities(ctx context.Context) (*PluginCapabilitySet, error) { diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 5d997722e..ff8f6f2ff 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -85,11 +85,12 @@ func TestClient_RPC_PluginProbe(t *testing.T) { func TestClient_RPC_PluginInfo(t *testing.T) { cases := []struct { - Name string - ResponseErr error - InfoResponse *csipbv1.GetPluginInfoResponse - ExpectedResponse string - ExpectedErr error + Name string + ResponseErr error + InfoResponse *csipbv1.GetPluginInfoResponse + ExpectedResponseName string + ExpectedResponseVersion string + ExpectedErr error }{ { Name: "handles underlying grpc errors", @@ -99,16 +100,19 @@ func TestClient_RPC_PluginInfo(t *testing.T) { { Name: "returns an error if we receive an empty `name`", InfoResponse: &csipbv1.GetPluginInfoResponse{ - Name: "", + Name: "", + VendorVersion: "", }, ExpectedErr: fmt.Errorf("PluginGetInfo: plugin returned empty name field"), }, { Name: "returns the name when successfully retrieved and not empty", InfoResponse: &csipbv1.GetPluginInfoResponse{ - Name: "com.hashicorp.storage", + Name: "com.hashicorp.storage", + VendorVersion: "1.0.1", }, - ExpectedResponse: "com.hashicorp.storage", + ExpectedResponseName: "com.hashicorp.storage", + ExpectedResponseVersion: "1.0.1", }, } @@ -120,12 +124,13 @@ func TestClient_RPC_PluginInfo(t *testing.T) { ic.NextErr = c.ResponseErr ic.NextPluginInfo = c.InfoResponse - resp, err := client.PluginGetInfo(context.TODO()) + name, version, err := client.PluginGetInfo(context.TODO()) if c.ExpectedErr != nil { require.Error(t, c.ExpectedErr, err) } - require.Equal(t, c.ExpectedResponse, resp) + require.Equal(t, c.ExpectedResponseName, name) + require.Equal(t, c.ExpectedResponseVersion, version) }) } diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index 3809f9333..b18ec6f6e 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -28,9 +28,10 @@ type Client struct { NextPluginProbeErr error PluginProbeCallCount int64 - NextPluginGetInfoResponse string - NextPluginGetInfoErr error - PluginGetInfoCallCount int64 + NextPluginGetInfoNameResponse string + NextPluginGetInfoVersionResponse string + NextPluginGetInfoErr error + PluginGetInfoCallCount int64 NextPluginGetCapabilitiesResponse *csi.PluginCapabilitySet NextPluginGetCapabilitiesErr error @@ -106,13 +107,13 @@ func (c *Client) PluginProbe(ctx context.Context) (bool, error) { // PluginGetInfo is used to return semantic data about the plugin. // Response: // - string: name, the name of the plugin in domain notation format. -func (c *Client) PluginGetInfo(ctx context.Context) (string, error) { +func (c *Client) PluginGetInfo(ctx context.Context) (string, string, error) { c.Mu.Lock() defer c.Mu.Unlock() c.PluginGetInfoCallCount++ - return c.NextPluginGetInfoResponse, c.NextPluginGetInfoErr + return c.NextPluginGetInfoNameResponse, c.NextPluginGetInfoVersionResponse, c.NextPluginGetInfoErr } // PluginGetCapabilities is used to return the available capabilities from the diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 9eac22e25..97e463dee 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -23,7 +23,8 @@ type CSIPlugin interface { // PluginGetInfo is used to return semantic data about the plugin. // Response: // - string: name, the name of the plugin in domain notation format. - PluginGetInfo(ctx context.Context) (string, error) + // - string: version, the vendor version of the plugin + PluginGetInfo(ctx context.Context) (string, string, error) // PluginGetCapabilities is used to return the available capabilities from the // identity service. This currently only looks for the CONTROLLER_SERVICE and From b57df162ce7f024fcae1a2ef9792fe8347d5d29b Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 9 Mar 2020 11:24:14 -0400 Subject: [PATCH 108/126] csi: ensure GET for plugin is idempotent (#7298) We denormalize the `CSIPlugin` struct when we query it from the state store by getting the current set of allocations that provide the plugin. But unless we copy the plugin, this denormalization gets synced back to the state store and each time we query we'll add another copy of the current allocations. --- nomad/csi_endpoint.go | 2 +- nomad/csi_endpoint_test.go | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 3a6bdbc33..1ae89189c 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -459,7 +459,7 @@ func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPlu } if plug != nil { - plug, err = state.CSIPluginDenormalize(ws, plug) + plug, err = state.CSIPluginDenormalize(ws, plug.Copy()) } if err != nil { return err diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index dbd33ad9a..f918ef7d9 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -517,6 +517,11 @@ func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(resp3.Plugins)) + // ensure that plugin->alloc denormalization does COW correctly + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.List", req3, resp3) + require.NoError(t, err) + require.Equal(t, 1, len(resp3.Plugins)) + // Deregistration works deleteNodes() From b04d23dae067751c1362f6c175b9b6fdaf9b7329 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 9 Mar 2020 16:58:12 -0400 Subject: [PATCH 109/126] csi: ensure volume query is idempotent (#7303) We denormalize the `CSIVolume` struct when we query it from the state store by getting the plugin and its health. But unless we copy the volume, this denormalization gets synced back to the state store without passing through the fsm (which is invalid). --- nomad/csi_endpoint.go | 2 +- nomad/state/state_store.go | 12 ++++++---- nomad/state/state_store_test.go | 39 ++++++++++++++++++++++++++++----- 3 files changed, 43 insertions(+), 10 deletions(-) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 1ae89189c..ace05fabb 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -134,7 +134,7 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV } vol := raw.(*structs.CSIVolume) - vol, err := state.CSIVolumeDenormalizePlugins(ws, vol) + vol, err := state.CSIVolumeDenormalizePlugins(ws, vol.Copy()) if err != nil { return err } diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index d82bfaa98..c8409d402 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1655,7 +1655,8 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum return nil } -// CSIVolumeByID is used to lookup a single volume. Its plugins are denormalized to provide accurate Health +// CSIVolumeByID is used to lookup a single volume. Returns a copy of the volume +// because its plugins are denormalized to provide accurate Health. func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, id string) (*structs.CSIVolume, error) { txn := s.db.Txn(false) @@ -1670,7 +1671,7 @@ func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, id string) (*structs.CSIVo } vol := obj.(*structs.CSIVolume) - return s.CSIVolumeDenormalizePlugins(ws, vol) + return s.CSIVolumeDenormalizePlugins(ws, vol.Copy()) } // CSIVolumes looks up csi_volumes by pluginID @@ -1741,8 +1742,11 @@ func (s *StateStore) CSIVolumeClaim(index uint64, id string, alloc *structs.Allo return fmt.Errorf("volume row conversion error") } - volume := orig.Copy() - + ws := memdb.NewWatchSet() + volume, err := s.CSIVolumeDenormalizePlugins(ws, orig.Copy()) + if err != nil { + return err + } if !volume.Claim(claim, alloc) { return fmt.Errorf("volume max claim reached") } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 5af1ad81b..816697722 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2829,9 +2829,34 @@ func TestStateStore_RestoreJobSummary(t *testing.T) { // TestStateStore_CSIVolume checks register, list and deregister for csi_volumes func TestStateStore_CSIVolume(t *testing.T) { state := testStateStore(t) + index := uint64(1000) + + // Create a node running a healthy instance of the plugin + node := mock.Node() + pluginID := "minnie" + allocID := uuid.Generate() + node.CSINodePlugins = map[string]*structs.CSIInfo{ + pluginID: { + PluginID: pluginID, + AllocID: allocID, + Healthy: true, + HealthDescription: "healthy", + RequiresControllerPlugin: false, + RequiresTopologies: false, + NodeInfo: &structs.CSINodeInfo{ + ID: node.ID, + MaxVolumes: 64, + RequiresNodeStageVolume: true, + }, + }, + } + + index++ + state.UpsertNode(index, node) + + defer state.DeleteNode(9999, []string{pluginID}) id0, id1 := uuid.Generate(), uuid.Generate() - index := uint64(1000) v0 := structs.NewCSIVolume("foo", index) v0.ID = id0 @@ -2850,7 +2875,8 @@ func TestStateStore_CSIVolume(t *testing.T) { v1.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter v1.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem - err := state.CSIVolumeRegister(0, []*structs.CSIVolume{v0, v1}) + index++ + err := state.CSIVolumeRegister(index, []*structs.CSIVolume{v0, v1}) require.NoError(t, err) ws := memdb.NewWatchSet() @@ -2878,7 +2904,8 @@ func TestStateStore_CSIVolume(t *testing.T) { vs = slurp(iter) require.Equal(t, 1, len(vs)) - err = state.CSIVolumeDeregister(1, []string{ + index++ + err = state.CSIVolumeDeregister(index, []string{ id1, }) require.NoError(t, err) @@ -2902,9 +2929,11 @@ func TestStateStore_CSIVolume(t *testing.T) { w := structs.CSIVolumeClaimWrite u := structs.CSIVolumeClaimRelease - err = state.CSIVolumeClaim(2, id0, a0, r) + index++ + err = state.CSIVolumeClaim(index, id0, a0, r) require.NoError(t, err) - err = state.CSIVolumeClaim(2, id0, a1, w) + index++ + err = state.CSIVolumeClaim(index, id0, a1, w) require.NoError(t, err) ws = memdb.NewWatchSet() From de25fc6cf486410927ec88f7f35386b2ebadd204 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Tue, 10 Mar 2020 10:22:42 -0400 Subject: [PATCH 110/126] csi: csi-hostpath plugin unimplemented error on controller publish (#7299) * client/allocrunner/csi_hook: tag errors * nomad/client_csi_endpoint: tag errors * nomad/client_rpc: remove an unnecessary error tag * nomad/state/state_store: ControllerRequired fix intent We use ControllerRequired to indicate that a volume should use the publish/unpublish workflow, rather than that it has a controller. We need to check both RequiresControllerPlugin and SupportsAttachDetach from the fingerprint to check that. * nomad/csi_endpoint: tag errors * nomad/csi_endpoint_test: longer error messages, mock fingerprints --- client/allocrunner/csi_hook.go | 2 +- nomad/client_csi_endpoint.go | 13 +++++++++++-- nomad/client_rpc.go | 2 +- nomad/csi_endpoint.go | 4 ++-- nomad/csi_endpoint_test.go | 29 ++++++++++++++++------------- nomad/state/state_store.go | 1 - nomad/structs/csi.go | 3 +++ 7 files changed, 34 insertions(+), 20 deletions(-) diff --git a/client/allocrunner/csi_hook.go b/client/allocrunner/csi_hook.go index 8e6c05397..02b0e4708 100644 --- a/client/allocrunner/csi_hook.go +++ b/client/allocrunner/csi_hook.go @@ -34,7 +34,7 @@ func (c *csiHook) Prerun() error { ctx := context.TODO() volumes, err := c.claimVolumesFromAlloc() if err != nil { - return err + return fmt.Errorf("claim volumes: %v", err) } mounts := make(map[string]*csimanager.MountInfo, len(volumes)) diff --git a/nomad/client_csi_endpoint.go b/nomad/client_csi_endpoint.go index 061e6493d..36c230ea7 100644 --- a/nomad/client_csi_endpoint.go +++ b/nomad/client_csi_endpoint.go @@ -2,6 +2,7 @@ package nomad import ( "errors" + "fmt" "time" metrics "github.com/armon/go-metrics" @@ -42,7 +43,11 @@ func (a *ClientCSIController) AttachVolume(args *cstructs.ClientCSIControllerAtt } // Make the RPC - return NodeRpc(state.Session, "CSIController.AttachVolume", args, reply) + err = NodeRpc(state.Session, "CSIController.AttachVolume", args, reply) + if err != nil { + return fmt.Errorf("attach volume: %v", err) + } + return nil } func (a *ClientCSIController) ValidateVolume(args *cstructs.ClientCSIControllerValidateVolumeRequest, reply *cstructs.ClientCSIControllerValidateVolumeResponse) error { @@ -71,5 +76,9 @@ func (a *ClientCSIController) ValidateVolume(args *cstructs.ClientCSIControllerV } // Make the RPC - return NodeRpc(state.Session, "CSIController.ValidateVolume", args, reply) + err = NodeRpc(state.Session, "CSIController.ValidateVolume", args, reply) + if err != nil { + return fmt.Errorf("validate volume: %v", err) + } + return nil } diff --git a/nomad/client_rpc.go b/nomad/client_rpc.go index 0c5d611e5..b17c3d39f 100644 --- a/nomad/client_rpc.go +++ b/nomad/client_rpc.go @@ -232,7 +232,7 @@ func NodeRpc(session *yamux.Session, method string, args, reply interface{}) err // Make the RPC err = msgpackrpc.CallWithCodec(pool.NewClientCodec(stream), method, args, reply) if err != nil { - return fmt.Errorf("rpc call: %v", err) + return err } return nil diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index ace05fabb..2e6226a9e 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -357,7 +357,7 @@ func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CS // adds a Volume and PublishContext from the controller (if any) to the reply err = v.srv.controllerPublishVolume(args, reply) if err != nil { - return err + return fmt.Errorf("controllerPublish: %v", err) } resp, index, err := v.srv.raftApply(structs.CSIVolumeClaimRequestType, args) @@ -540,7 +540,7 @@ func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, r err = srv.RPC(method, cReq, cResp) if err != nil { - return err + return fmt.Errorf("attach volume: %v", err) } resp.PublishContext = cResp.PublishContext return nil diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index f918ef7d9..0bbd5684b 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -220,7 +220,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { } claimResp := &structs.CSIVolumeClaimResponse{} err := msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) - require.EqualError(t, err, fmt.Sprintf("volume not found: %s", id0), + require.EqualError(t, err, fmt.Sprintf("controllerPublish: volume not found: %s", id0), "expected 'volume not found' error because volume hasn't yet been created") // Create a client node, plugin, alloc, and volume @@ -329,19 +329,20 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { node := mock.Node() node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version node.CSIControllerPlugins = map[string]*structs.CSIInfo{ - "minnie": {PluginID: "minnie", - Healthy: true, - ControllerInfo: &structs.CSIControllerInfo{}, - NodeInfo: &structs.CSINodeInfo{}, + "minnie": { + PluginID: "minnie", + Healthy: true, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsAttachDetach: true, + }, RequiresControllerPlugin: true, }, } node.CSINodePlugins = map[string]*structs.CSIInfo{ - "minnie": {PluginID: "minnie", - Healthy: true, - ControllerInfo: &structs.CSIControllerInfo{}, - NodeInfo: &structs.CSINodeInfo{}, - RequiresControllerPlugin: true, + "minnie": { + PluginID: "minnie", + Healthy: true, + NodeInfo: &structs.CSINodeInfo{}, }, } err := state.UpsertNode(1002, node) @@ -376,7 +377,7 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { claimResp := &structs.CSIVolumeClaimResponse{} err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) // Because the node is not registered - require.EqualError(t, err, "No path to node") + require.EqualError(t, err, "controllerPublish: attach volume: No path to node") } func TestCSIVolumeEndpoint_List(t *testing.T) { @@ -544,8 +545,10 @@ func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { // Create a client node with a plugin node := mock.Node() node.CSINodePlugins = map[string]*structs.CSIInfo{ - "minnie": {PluginID: "minnie", Healthy: true, RequiresControllerPlugin: true}, - "adam": {PluginID: "adam", Healthy: true}, + "minnie": {PluginID: "minnie", Healthy: true, RequiresControllerPlugin: true, + ControllerInfo: &structs.CSIControllerInfo{SupportsAttachDetach: true}, + }, + "adam": {PluginID: "adam", Healthy: true}, } err := state.UpsertNode(3, node) require.NoError(t, err) diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index c8409d402..0a97c1ec5 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -957,7 +957,6 @@ func upsertNodeCSIPlugins(txn *memdb.Txn, node *structs.Node, index uint64) erro plug = raw.(*structs.CSIPlugin).Copy() } else { plug = structs.NewCSIPlugin(info.PluginID, index) - plug.ControllerRequired = info.RequiresControllerPlugin plug.Provider = info.Provider plug.Version = info.ProviderVersion } diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 4e65ec6a7..29c96fc08 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -527,6 +527,9 @@ func (p *CSIPlugin) Copy() *CSIPlugin { // transaction func (p *CSIPlugin) AddPlugin(nodeID string, info *CSIInfo) { if info.ControllerInfo != nil { + p.ControllerRequired = info.RequiresControllerPlugin && + info.ControllerInfo.SupportsAttachDetach + prev, ok := p.Controllers[nodeID] if ok && prev.Healthy { p.ControllersHealthy -= 1 From 80619137abb131242c27c6cc0e2214e435d7ddfd Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Wed, 11 Mar 2020 12:47:14 -0400 Subject: [PATCH 111/126] csi: volumes listed in `nomad node status` (#7318) * api/allocations: GetTaskGroup finds the taskgroup struct * command/node_status: display CSI volume names * nomad/state/state_store: new CSIVolumesByNodeID * nomad/state/iterator: new SliceIterator type implements memdb.ResultIterator * nomad/csi_endpoint: deal with a slice of volumes * nomad/state/state_store: CSIVolumesByNodeID return a SliceIterator * nomad/structs/csi: CSIVolumeListRequest takes a NodeID * nomad/csi_endpoint: use the return iterator * command/agent/csi_endpoint: parse query params for CSIVolumes.List * api/nodes: new CSIVolumes to list volumes by node * command/node_status: use the new list endpoint to print volumes * nomad/state/state_store: error messages consider the operator * command/node_status: include the Provider --- api/allocations.go | 19 +++--- api/nodes.go | 10 ++++ command/agent/csi_endpoint.go | 8 +++ command/node_status.go | 101 +++++++++++++++++++++++++++----- nomad/csi_endpoint.go | 9 ++- nomad/state/iterator.go | 30 ++++++++++ nomad/state/state_store.go | 49 ++++++++++++++++ nomad/state/state_store_test.go | 46 +++++++++++---- nomad/structs/csi.go | 1 + 9 files changed, 238 insertions(+), 35 deletions(-) create mode 100644 nomad/state/iterator.go diff --git a/api/allocations.go b/api/allocations.go index cc2be5ea7..3552390a7 100644 --- a/api/allocations.go +++ b/api/allocations.go @@ -507,18 +507,23 @@ func (a AllocIndexSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Allocation) GetTaskGroup() *TaskGroup { + for _, tg := range a.Job.TaskGroups { + if *tg.Name == a.TaskGroup { + return tg + } + } + return nil +} + // RescheduleInfo is used to calculate remaining reschedule attempts // according to the given time and the task groups reschedule policy func (a Allocation) RescheduleInfo(t time.Time) (int, int) { - var reschedulePolicy *ReschedulePolicy - for _, tg := range a.Job.TaskGroups { - if *tg.Name == a.TaskGroup { - reschedulePolicy = tg.ReschedulePolicy - } - } - if reschedulePolicy == nil { + tg := a.GetTaskGroup() + if tg == nil || tg.ReschedulePolicy == nil { return 0, 0 } + reschedulePolicy := tg.ReschedulePolicy availableAttempts := *reschedulePolicy.Attempts interval := *reschedulePolicy.Interval attempted := 0 diff --git a/api/nodes.go b/api/nodes.go index a942c4545..f567a9b51 100644 --- a/api/nodes.go +++ b/api/nodes.go @@ -392,6 +392,16 @@ func (n *Nodes) Allocations(nodeID string, q *QueryOptions) ([]*Allocation, *Que return resp, qm, nil } +func (n *Nodes) CSIVolumes(nodeID string, q *QueryOptions) ([]*CSIVolumeListStub, error) { + var resp []*CSIVolumeListStub + path := fmt.Sprintf("/v1/csi/volumes?node_id=%s", nodeID) + if _, err := n.client.query(path, &resp, q); err != nil { + return nil, err + } + + return resp, nil +} + // ForceEvaluate is used to force-evaluate an existing node. func (n *Nodes) ForceEvaluate(nodeID string, q *WriteOptions) (string, *WriteMeta, error) { var resp nodeEvalResponse diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go index c6daad16f..6f8d6e240 100644 --- a/command/agent/csi_endpoint.go +++ b/command/agent/csi_endpoint.go @@ -18,6 +18,14 @@ func (s *HTTPServer) CSIVolumesRequest(resp http.ResponseWriter, req *http.Reque return nil, nil } + query := req.URL.Query() + if plugin, ok := query["plugin_id"]; ok { + args.PluginID = plugin[0] + } + if node, ok := query["node_id"]; ok { + args.NodeID = node[0] + } + var out structs.CSIVolumeListResponse if err := s.agent.RPC("CSIVolume.List", &args, &out); err != nil { return nil, err diff --git a/command/node_status.go b/command/node_status.go index 3f8424e76..307d55028 100644 --- a/command/node_status.go +++ b/command/node_status.go @@ -317,6 +317,22 @@ func nodeCSINodeNames(n *api.Node) []string { return names } +func nodeCSIVolumeNames(n *api.Node, allocs []*api.Allocation) []string { + var names []string + for _, alloc := range allocs { + tg := alloc.GetTaskGroup() + if tg == nil || len(tg.Volumes) == 0 { + continue + } + + for _, v := range tg.Volumes { + names = append(names, v.Name) + } + } + sort.Strings(names) + return names +} + func nodeVolumeNames(n *api.Node) []string { var volumes []string for name := range n.HostVolumes { @@ -349,6 +365,20 @@ func formatDrain(n *api.Node) string { } func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { + // Make one API call for allocations + nodeAllocs, _, err := client.Nodes().Allocations(node.ID, nil) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err)) + return 1 + } + + var runningAllocs []*api.Allocation + for _, alloc := range nodeAllocs { + if alloc.ClientStatus == "running" { + runningAllocs = append(runningAllocs, alloc) + } + } + // Format the header output basic := []string{ fmt.Sprintf("ID|%s", node.ID), @@ -364,11 +394,12 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { if c.short { basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ","))) + basic = append(basic, fmt.Sprintf("CSI Volumes|%s", strings.Join(nodeCSIVolumeNames(node, runningAllocs), ","))) basic = append(basic, fmt.Sprintf("Drivers|%s", strings.Join(nodeDrivers(node), ","))) c.Ui.Output(c.Colorize().Color(formatKV(basic))) // Output alloc info - if err := c.outputAllocInfo(client, node); err != nil { + if err := c.outputAllocInfo(node, nodeAllocs); err != nil { c.Ui.Error(fmt.Sprintf("%s", err)) return 1 } @@ -391,7 +422,7 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { // driver info in the basic output if !c.verbose { basic = append(basic, fmt.Sprintf("Host Volumes|%s", strings.Join(nodeVolumeNames(node), ","))) - + basic = append(basic, fmt.Sprintf("CSI Volumes|%s", strings.Join(nodeCSIVolumeNames(node, runningAllocs), ","))) driverStatus := fmt.Sprintf("Driver Status| %s", c.outputTruncatedNodeDriverInfo(node)) basic = append(basic, driverStatus) } @@ -402,6 +433,7 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { // If we're running in verbose mode, include full host volume and driver info if c.verbose { c.outputNodeVolumeInfo(node) + c.outputNodeCSIVolumeInfo(client, node, runningAllocs) c.outputNodeDriverInfo(node) } @@ -409,12 +441,6 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { c.outputNodeStatusEvents(node) // Get list of running allocations on the node - runningAllocs, err := getRunningAllocs(client, node.ID) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error querying node for running allocations: %s", err)) - return 1 - } - allocatedResources := getAllocatedResources(client, runningAllocs, node) c.Ui.Output(c.Colorize().Color("\n[bold]Allocated Resources[reset]")) c.Ui.Output(formatList(allocatedResources)) @@ -452,7 +478,7 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { } } - if err := c.outputAllocInfo(client, node); err != nil { + if err := c.outputAllocInfo(node, nodeAllocs); err != nil { c.Ui.Error(fmt.Sprintf("%s", err)) return 1 } @@ -460,12 +486,7 @@ func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int { return 0 } -func (c *NodeStatusCommand) outputAllocInfo(client *api.Client, node *api.Node) error { - nodeAllocs, _, err := client.Nodes().Allocations(node.ID, nil) - if err != nil { - return fmt.Errorf("Error querying node allocations: %s", err) - } - +func (c *NodeStatusCommand) outputAllocInfo(node *api.Node, nodeAllocs []*api.Allocation) error { c.Ui.Output(c.Colorize().Color("\n[bold]Allocations[reset]")) c.Ui.Output(formatAllocList(nodeAllocs, c.verbose, c.length)) @@ -515,6 +536,56 @@ func (c *NodeStatusCommand) outputNodeVolumeInfo(node *api.Node) { c.Ui.Output(formatList(output)) } +func (c *NodeStatusCommand) outputNodeCSIVolumeInfo(client *api.Client, node *api.Node, runningAllocs []*api.Allocation) { + c.Ui.Output(c.Colorize().Color("\n[bold]CSI Volumes")) + + // Duplicate nodeCSIVolumeNames to sort by name but also index volume names to ids + var names []string + volNames := map[string]string{} + for _, alloc := range runningAllocs { + tg := alloc.GetTaskGroup() + if tg == nil || len(tg.Volumes) == 0 { + continue + } + + for _, v := range tg.Volumes { + names = append(names, v.Name) + volNames[v.Source] = v.Name + } + } + if len(names) == 0 { + return + } + sort.Strings(names) + + // Fetch the volume objects with current status + // Ignore an error, all we're going to do is omit the volumes + volumes := map[string]*api.CSIVolumeListStub{} + vs, _ := client.Nodes().CSIVolumes(node.ID, nil) + for _, v := range vs { + n := volNames[v.ID] + volumes[n] = v + } + + // Output the volumes in name order + output := make([]string, 0, len(names)+1) + output = append(output, "ID|Name|Plugin ID|Schedulable|Provider|Access Mode") + for _, name := range names { + v := volumes[name] + output = append(output, fmt.Sprintf( + "%s|%s|%s|%t|%s|%s", + v.ID, + name, + v.PluginID, + v.Schedulable, + v.Provider, + v.AccessMode, + )) + } + + c.Ui.Output(formatList(output)) +} + func (c *NodeStatusCommand) outputNodeDriverInfo(node *api.Node) { c.Ui.Output(c.Colorize().Color("\n[bold]Drivers")) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 2e6226a9e..9d8e44a90 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -113,7 +113,9 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV var err error var iter memdb.ResultIterator - if args.PluginID != "" { + if args.NodeID != "" { + iter, err = state.CSIVolumesByNodeID(ws, args.NodeID) + } else if args.PluginID != "" { iter, err = state.CSIVolumesByPluginID(ws, args.PluginID) } else { iter, err = state.CSIVolumes(ws) @@ -144,6 +146,11 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV continue } + // Filter (possibly again) on PluginID to handle passing both NodeID and PluginID + if args.PluginID != "" && args.PluginID != vol.PluginID { + continue + } + // Cache ACL checks QUESTION: are they expensive? allowed, ok := cache[vol.Namespace] if !ok { diff --git a/nomad/state/iterator.go b/nomad/state/iterator.go new file mode 100644 index 000000000..2c0efb9ad --- /dev/null +++ b/nomad/state/iterator.go @@ -0,0 +1,30 @@ +package state + +type SliceIterator struct { + data []interface{} + idx int +} + +func NewSliceIterator() *SliceIterator { + return &SliceIterator{ + data: []interface{}{}, + idx: 0, + } +} + +func (i *SliceIterator) Add(datum interface{}) { + i.data = append(i.data, datum) +} + +func (i *SliceIterator) Next() interface{} { + if i.idx == len(i.data) { + return nil + } + idx := i.idx + i.idx += 1 + return i.data[idx] +} + +func (i *SliceIterator) WatchCh() <-chan struct{} { + return nil +} diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index 0a97c1ec5..d7eb45f29 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1710,6 +1710,55 @@ func (s *StateStore) CSIVolumesByIDPrefix(ws memdb.WatchSet, namespace, volumeID return wrap, nil } +// CSIVolumesByNodeID looks up CSIVolumes in use on a node +func (s *StateStore) CSIVolumesByNodeID(ws memdb.WatchSet, nodeID string) (memdb.ResultIterator, error) { + allocs, err := s.AllocsByNode(ws, nodeID) + if err != nil { + return nil, fmt.Errorf("alloc lookup failed: %v", err) + } + snap, err := s.Snapshot() + if err != nil { + return nil, fmt.Errorf("alloc lookup failed: %v", err) + } + + allocs, err = snap.DenormalizeAllocationSlice(allocs) + if err != nil { + return nil, fmt.Errorf("alloc lookup failed: %v", err) + } + + // Find volume ids for CSI volumes in running allocs, or allocs that we desire to run + ids := map[string]struct{}{} + for _, a := range allocs { + tg := a.Job.LookupTaskGroup(a.TaskGroup) + + if !(a.DesiredStatus == structs.AllocDesiredStatusRun || + a.ClientStatus == structs.AllocClientStatusRunning) || + len(tg.Volumes) == 0 { + continue + } + + for _, v := range tg.Volumes { + if v.Type != structs.VolumeTypeCSI { + continue + } + ids[v.Source] = struct{}{} + } + } + + // Lookup the raw CSIVolumes to match the other list interfaces + iter := NewSliceIterator() + txn := s.db.Txn(false) + for id := range ids { + raw, err := txn.First("csi_volumes", "id", id) + if err != nil { + return nil, fmt.Errorf("volume lookup failed: %s %v", id, err) + } + iter.Add(raw) + } + + return iter, nil +} + // CSIVolumes looks up the entire csi_volumes table func (s *StateStore) CSIVolumes(ws memdb.WatchSet) (memdb.ResultIterator, error) { txn := s.db.Txn(false) diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index 816697722..d498a2498 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2831,14 +2831,28 @@ func TestStateStore_CSIVolume(t *testing.T) { state := testStateStore(t) index := uint64(1000) + // Volume IDs + vol0, vol1 := uuid.Generate(), uuid.Generate() + // Create a node running a healthy instance of the plugin node := mock.Node() pluginID := "minnie" - allocID := uuid.Generate() + alloc := mock.Alloc() + alloc.DesiredStatus = "run" + alloc.ClientStatus = "running" + alloc.NodeID = node.ID + alloc.Job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + "foo": { + Name: "foo", + Source: vol0, + Type: "csi", + }, + } + node.CSINodePlugins = map[string]*structs.CSIInfo{ pluginID: { PluginID: pluginID, - AllocID: allocID, + AllocID: alloc.ID, Healthy: true, HealthDescription: "healthy", RequiresControllerPlugin: false, @@ -2852,14 +2866,16 @@ func TestStateStore_CSIVolume(t *testing.T) { } index++ - state.UpsertNode(index, node) - + err := state.UpsertNode(index, node) + require.NoError(t, err) defer state.DeleteNode(9999, []string{pluginID}) - id0, id1 := uuid.Generate(), uuid.Generate() + index++ + err = state.UpsertAllocs(index, []*structs.Allocation{alloc}) + require.NoError(t, err) v0 := structs.NewCSIVolume("foo", index) - v0.ID = id0 + v0.ID = vol0 v0.Namespace = "default" v0.PluginID = "minnie" v0.Schedulable = true @@ -2868,7 +2884,7 @@ func TestStateStore_CSIVolume(t *testing.T) { index++ v1 := structs.NewCSIVolume("foo", index) - v1.ID = id1 + v1.ID = vol1 v1.Namespace = "default" v1.PluginID = "adam" v1.Schedulable = true @@ -2876,7 +2892,7 @@ func TestStateStore_CSIVolume(t *testing.T) { v1.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem index++ - err := state.CSIVolumeRegister(index, []*structs.CSIVolume{v0, v1}) + err = state.CSIVolumeRegister(index, []*structs.CSIVolume{v0, v1}) require.NoError(t, err) ws := memdb.NewWatchSet() @@ -2904,9 +2920,15 @@ func TestStateStore_CSIVolume(t *testing.T) { vs = slurp(iter) require.Equal(t, 1, len(vs)) + ws = memdb.NewWatchSet() + iter, err = state.CSIVolumesByNodeID(ws, node.ID) + require.NoError(t, err) + vs = slurp(iter) + require.Equal(t, 1, len(vs)) + index++ err = state.CSIVolumeDeregister(index, []string{ - id1, + vol1, }) require.NoError(t, err) @@ -2930,10 +2952,10 @@ func TestStateStore_CSIVolume(t *testing.T) { u := structs.CSIVolumeClaimRelease index++ - err = state.CSIVolumeClaim(index, id0, a0, r) + err = state.CSIVolumeClaim(index, vol0, a0, r) require.NoError(t, err) index++ - err = state.CSIVolumeClaim(index, id0, a1, w) + err = state.CSIVolumeClaim(index, vol0, a1, w) require.NoError(t, err) ws = memdb.NewWatchSet() @@ -2942,7 +2964,7 @@ func TestStateStore_CSIVolume(t *testing.T) { vs = slurp(iter) require.False(t, vs[0].CanWrite()) - err = state.CSIVolumeClaim(2, id0, a0, u) + err = state.CSIVolumeClaim(2, vol0, a0, u) require.NoError(t, err) ws = memdb.NewWatchSet() iter, err = state.CSIVolumesByPluginID(ws, "minnie") diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 29c96fc08..f1ad4e9fa 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -449,6 +449,7 @@ type CSIVolumeClaimResponse struct { type CSIVolumeListRequest struct { PluginID string + NodeID string QueryOptions } From 99841222ed990c29c719f9e46f2a78b0d65cdf97 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Thu, 12 Mar 2020 14:20:40 -0400 Subject: [PATCH 112/126] csi: change the API paths to match CLI command layout (#7325) * command/agent/csi_endpoint: support type filter in volumes & plugins * command/agent/http: use /v1/volume/csi & /v1/plugin/csi * api/csi: use /v1/volume/csi & /v1/plugin/csi * api/nodes: use /v1/volume/csi & /v1/plugin/csi * api/nodes: not /volumes/csi, just /volumes * command/agent/csi_endpoint: fix ot parameter parsing --- api/csi.go | 12 ++++++------ api/nodes.go | 2 +- command/agent/csi_endpoint.go | 29 ++++++++++++++++++++++++++--- command/agent/http.go | 8 ++++---- 4 files changed, 37 insertions(+), 14 deletions(-) diff --git a/api/csi.go b/api/csi.go index a97b797e1..df1c372d3 100644 --- a/api/csi.go +++ b/api/csi.go @@ -18,7 +18,7 @@ func (c *Client) CSIVolumes() *CSIVolumes { // List returns all CSI volumes func (v *CSIVolumes) List(q *QueryOptions) ([]*CSIVolumeListStub, *QueryMeta, error) { var resp []*CSIVolumeListStub - qm, err := v.client.query("/v1/csi/volumes", &resp, q) + qm, err := v.client.query("/v1/volumes?type=csi", &resp, q) if err != nil { return nil, nil, err } @@ -34,7 +34,7 @@ func (v *CSIVolumes) PluginList(pluginID string) ([]*CSIVolumeListStub, *QueryMe // Info is used to retrieve a single CSIVolume func (v *CSIVolumes) Info(id string, q *QueryOptions) (*CSIVolume, *QueryMeta, error) { var resp CSIVolume - qm, err := v.client.query("/v1/csi/volume/"+id, &resp, q) + qm, err := v.client.query("/v1/volume/csi/"+id, &resp, q) if err != nil { return nil, nil, err } @@ -49,12 +49,12 @@ func (v *CSIVolumes) Register(vol *CSIVolume, w *WriteOptions) (*WriteMeta, erro req := CSIVolumeRegisterRequest{ Volumes: []*CSIVolume{vol}, } - meta, err := v.client.write("/v1/csi/volume/"+vol.ID, req, nil, w) + meta, err := v.client.write("/v1/volume/csi/"+vol.ID, req, nil, w) return meta, err } func (v *CSIVolumes) Deregister(id string, w *WriteOptions) error { - _, err := v.client.delete("/v1/csi/volume/"+id, nil, w) + _, err := v.client.delete("/v1/volume/csi/"+id, nil, w) return err } @@ -229,7 +229,7 @@ func (c *Client) CSIPlugins() *CSIPlugins { // List returns all CSI plugins func (v *CSIPlugins) List(q *QueryOptions) ([]*CSIPluginListStub, *QueryMeta, error) { var resp []*CSIPluginListStub - qm, err := v.client.query("/v1/csi/plugins", &resp, q) + qm, err := v.client.query("/v1/plugins?type=csi", &resp, q) if err != nil { return nil, nil, err } @@ -240,7 +240,7 @@ func (v *CSIPlugins) List(q *QueryOptions) ([]*CSIPluginListStub, *QueryMeta, er // Info is used to retrieve a single CSI Plugin Job func (v *CSIPlugins) Info(id string, q *QueryOptions) (*CSIPlugin, *QueryMeta, error) { var resp *CSIPlugin - qm, err := v.client.query("/v1/csi/plugin/"+id, &resp, q) + qm, err := v.client.query("/v1/plugin/csi/"+id, &resp, q) if err != nil { return nil, nil, err } diff --git a/api/nodes.go b/api/nodes.go index f567a9b51..8ec6f8d0a 100644 --- a/api/nodes.go +++ b/api/nodes.go @@ -394,7 +394,7 @@ func (n *Nodes) Allocations(nodeID string, q *QueryOptions) ([]*Allocation, *Que func (n *Nodes) CSIVolumes(nodeID string, q *QueryOptions) ([]*CSIVolumeListStub, error) { var resp []*CSIVolumeListStub - path := fmt.Sprintf("/v1/csi/volumes?node_id=%s", nodeID) + path := fmt.Sprintf("/v1/volumes?type=csi&node_id=%s", nodeID) if _, err := n.client.query(path, &resp, q); err != nil { return nil, err } diff --git a/command/agent/csi_endpoint.go b/command/agent/csi_endpoint.go index 6f8d6e240..28db3cc8b 100644 --- a/command/agent/csi_endpoint.go +++ b/command/agent/csi_endpoint.go @@ -7,18 +7,30 @@ import ( "github.com/hashicorp/nomad/nomad/structs" ) +const errRequiresType = "Missing required parameter type" + func (s *HTTPServer) CSIVolumesRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { if req.Method != "GET" { return nil, CodedError(405, ErrInvalidMethod) } + // Type filters volume lists to a specific type. When support for non-CSI volumes is + // introduced, we'll need to dispatch here + query := req.URL.Query() + qtype, ok := query["type"] + if !ok { + return nil, CodedError(400, errRequiresType) + } + if qtype[0] != "csi" { + return nil, nil + } + args := structs.CSIVolumeListRequest{} if s.parse(resp, req, &args.Region, &args.QueryOptions) { return nil, nil } - query := req.URL.Query() if plugin, ok := query["plugin_id"]; ok { args.PluginID = plugin[0] } @@ -38,7 +50,7 @@ func (s *HTTPServer) CSIVolumesRequest(resp http.ResponseWriter, req *http.Reque // CSIVolumeSpecificRequest dispatches GET and PUT func (s *HTTPServer) CSIVolumeSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Tokenize the suffix of the path to get the volume id - reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/csi/volume/") + reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/volume/csi/") tokens := strings.Split(reqSuffix, "/") if len(tokens) > 2 || len(tokens) < 1 { return nil, CodedError(404, resourceNotFoundErr) @@ -129,6 +141,17 @@ func (s *HTTPServer) CSIPluginsRequest(resp http.ResponseWriter, req *http.Reque return nil, CodedError(405, ErrInvalidMethod) } + // Type filters plugin lists to a specific type. When support for non-CSI plugins is + // introduced, we'll need to dispatch here + query := req.URL.Query() + qtype, ok := query["type"] + if !ok { + return nil, CodedError(400, errRequiresType) + } + if qtype[0] != "csi" { + return nil, nil + } + args := structs.CSIPluginListRequest{} if s.parse(resp, req, &args.Region, &args.QueryOptions) { @@ -151,7 +174,7 @@ func (s *HTTPServer) CSIPluginSpecificRequest(resp http.ResponseWriter, req *htt } // Tokenize the suffix of the path to get the plugin id - reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/csi/plugin/") + reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/plugin/csi/") tokens := strings.Split(reqSuffix, "/") if len(tokens) > 2 || len(tokens) < 1 { return nil, CodedError(404, resourceNotFoundErr) diff --git a/command/agent/http.go b/command/agent/http.go index dcd270213..ca4f3d137 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -253,10 +253,10 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) { s.mux.HandleFunc("/v1/deployments", s.wrap(s.DeploymentsRequest)) s.mux.HandleFunc("/v1/deployment/", s.wrap(s.DeploymentSpecificRequest)) - s.mux.HandleFunc("/v1/csi/volumes", s.wrap(s.CSIVolumesRequest)) - s.mux.HandleFunc("/v1/csi/volume/", s.wrap(s.CSIVolumeSpecificRequest)) - s.mux.HandleFunc("/v1/csi/plugins", s.wrap(s.CSIPluginsRequest)) - s.mux.HandleFunc("/v1/csi/plugin/", s.wrap(s.CSIPluginSpecificRequest)) + s.mux.HandleFunc("/v1/volumes", s.wrap(s.CSIVolumesRequest)) + s.mux.HandleFunc("/v1/volume/csi/", s.wrap(s.CSIVolumeSpecificRequest)) + s.mux.HandleFunc("/v1/plugins", s.wrap(s.CSIPluginsRequest)) + s.mux.HandleFunc("/v1/plugin/csi/", s.wrap(s.CSIPluginSpecificRequest)) s.mux.HandleFunc("/v1/acl/policies", s.wrap(s.ACLPoliciesRequest)) s.mux.HandleFunc("/v1/acl/policy/", s.wrap(s.ACLPolicySpecificRequest)) From 1cf7ef44ed4476ca29ec58c9b4d632226b243188 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 12 Mar 2020 14:40:07 -0400 Subject: [PATCH 113/126] csi: docstring and log message fixups (#7327) Fix some docstring typos and fix noisy log message during client restarts. A log for the common case where the plugin socket isn't ready yet isn't actionable by the operator so having it at info is just noise. --- client/allocrunner/taskrunner/plugin_supervisor_hook.go | 2 +- client/devicemanager/manager.go | 2 +- client/devicemanager/state/state.go | 4 ++-- client/pluginmanager/drivermanager/state/state.go | 4 ++-- client/state/state_database.go | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index 6f3a3b8b3..8db05723c 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -186,7 +186,7 @@ WAITFORREADY: case <-t.C: pluginHealthy, err := h.supervisorLoopOnce(ctx, socketPath) if err != nil || !pluginHealthy { - h.logger.Info("CSI Plugin not ready", "error", err) + h.logger.Debug("CSI Plugin not ready", "error", err) // Plugin is not yet returning healthy, because we want to optimise for // quickly bringing a plugin online, we use a short timeout here. diff --git a/client/devicemanager/manager.go b/client/devicemanager/manager.go index c0fade17d..afc22847b 100644 --- a/client/devicemanager/manager.go +++ b/client/devicemanager/manager.go @@ -128,7 +128,7 @@ func New(c *Config) *manager { // PluginType identifies this manager to the plugin manager and satisfies the PluginManager interface. func (*manager) PluginType() string { return base.PluginTypeDevice } -// Run starts thed device manager. The manager will shutdown any previously +// Run starts the device manager. The manager will shutdown any previously // launched plugin and then begin fingerprinting and stats collection on all new // device plugins. func (m *manager) Run() { diff --git a/client/devicemanager/state/state.go b/client/devicemanager/state/state.go index e74be11ac..20eb789f6 100644 --- a/client/devicemanager/state/state.go +++ b/client/devicemanager/state/state.go @@ -2,10 +2,10 @@ package state import pstructs "github.com/hashicorp/nomad/plugins/shared/structs" -// PluginState is used to store the device managers state across restarts of the +// PluginState is used to store the device manager's state across restarts of the // agent type PluginState struct { - // ReattachConfigs are the set of reattach configs for plugin's launched by + // ReattachConfigs are the set of reattach configs for plugins launched by // the device manager ReattachConfigs map[string]*pstructs.ReattachConfig } diff --git a/client/pluginmanager/drivermanager/state/state.go b/client/pluginmanager/drivermanager/state/state.go index 529499cff..f37717c3b 100644 --- a/client/pluginmanager/drivermanager/state/state.go +++ b/client/pluginmanager/drivermanager/state/state.go @@ -2,10 +2,10 @@ package state import pstructs "github.com/hashicorp/nomad/plugins/shared/structs" -// PluginState is used to store the driver managers state across restarts of the +// PluginState is used to store the driver manager's state across restarts of the // agent type PluginState struct { - // ReattachConfigs are the set of reattach configs for plugin's launched by + // ReattachConfigs are the set of reattach configs for plugins launched by // the driver manager ReattachConfigs map[string]*pstructs.ReattachConfig } diff --git a/client/state/state_database.go b/client/state/state_database.go index 6d1e65fb2..12d2c083d 100644 --- a/client/state/state_database.go +++ b/client/state/state_database.go @@ -34,7 +34,7 @@ devicemanager/ |--> plugin_state -> *dmstate.PluginState drivermanager/ -|--> plugin_state -> *dmstate.PluginState +|--> plugin_state -> *driverstate.PluginState */ var ( From 6750c262a43cde9941470e6b94284d398d61d471 Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Thu, 12 Mar 2020 15:08:19 -0400 Subject: [PATCH 114/126] csi: use `ExternalID`, when set, to identify volumes for outside RPC calls (#7326) * nomad/structs/csi: new RemoteID() uses the ExternalID if set * nomad/csi_endpoint: pass RemoteID to volume request types * client/pluginmanager/csimanager/volume: pass RemoteID to NodePublishVolume --- client/pluginmanager/csimanager/volume.go | 2 +- nomad/csi_endpoint.go | 6 +++--- nomad/structs/csi.go | 7 +++++++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index 49cec1cb9..e76edae78 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -181,7 +181,7 @@ func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolum } err = v.plugin.NodePublishVolume(ctx, &csi.NodePublishVolumeRequest{ - VolumeID: vol.ID, + VolumeID: vol.RemoteID(), PublishContext: publishContext, StagingTargetPath: pluginStagingPath, TargetPath: pluginTargetPath, diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 9d8e44a90..79632e436 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -247,7 +247,7 @@ func (srv *Server) controllerValidateVolume(req *structs.CSIVolumeRegisterReques method := "ClientCSIController.ValidateVolume" cReq := &cstructs.ClientCSIControllerValidateVolumeRequest{ - VolumeID: vol.ID, + VolumeID: vol.RemoteID(), AttachmentMode: vol.AttachmentMode, AccessMode: vol.AccessMode, } @@ -532,7 +532,7 @@ func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, r method := "ClientCSIController.AttachVolume" cReq := &cstructs.ClientCSIControllerAttachVolumeRequest{ - VolumeID: req.VolumeID, + VolumeID: vol.RemoteID(), ClientCSINodeID: targetCSIInfo.NodeInfo.ID, AttachmentMode: vol.AttachmentMode, AccessMode: vol.AccessMode, @@ -587,7 +587,7 @@ func (srv *Server) controllerUnpublishVolume(req *structs.CSIVolumeClaimRequest, method := "ClientCSIController.DetachVolume" cReq := &cstructs.ClientCSIControllerDetachVolumeRequest{ - VolumeID: req.VolumeID, + VolumeID: vol.RemoteID(), ClientCSINodeID: targetCSIInfo.NodeInfo.ID, } cReq.PluginID = plug.ID diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index f1ad4e9fa..e76865ac9 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -212,6 +212,13 @@ func (v *CSIVolume) newStructs() { v.WriteAllocs = map[string]*Allocation{} } +func (v *CSIVolume) RemoteID() string { + if v.ExternalID != "" { + return v.ExternalID + } + return v.ID +} + func (v *CSIVolume) Stub() *CSIVolListStub { stub := CSIVolListStub{ ID: v.ID, From eda7be552c87be450f58b1575117c926a922c3bd Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 12 Mar 2020 16:24:58 -0400 Subject: [PATCH 115/126] csi: add dynamicplugins registry to client state store (#7330) In order to correctly fingerprint dynamic plugins on client restarts, we need to persist a handle to the plugin (that is, connection info) to the client state store. The dynamic registry will sync automatically to the client state whenever it receives a register/deregister call. --- client/client.go | 21 ++++--- client/dynamicplugins/registry.go | 50 ++++++++++++++-- client/dynamicplugins/registry_test.go | 55 ++++++++++++++++- client/pluginmanager/csimanager/manager.go | 3 + .../pluginmanager/csimanager/manager_test.go | 1 + client/state/db_test.go | 26 ++++++++ client/state/interface.go | 7 +++ client/state/memdb.go | 17 ++++++ client/state/noopdb.go | 9 +++ client/state/state_database.go | 59 ++++++++++++++++++- 10 files changed, 231 insertions(+), 17 deletions(-) diff --git a/client/client.go b/client/client.go index 05ce5dd6a..96100fa44 100644 --- a/client/client.go +++ b/client/client.go @@ -341,14 +341,6 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic invalidAllocs: make(map[string]struct{}), serversContactedCh: make(chan struct{}), serversContactedOnce: sync.Once{}, - dynamicRegistry: dynamicplugins.NewRegistry(map[string]dynamicplugins.PluginDispenser{ - dynamicplugins.PluginTypeCSIController: func(info *dynamicplugins.PluginInfo) (interface{}, error) { - return csi.NewClient(info.ConnectionInfo.SocketPath, logger.Named("csi_client").With("plugin.name", info.Name, "plugin.type", "controller")) - }, - dynamicplugins.PluginTypeCSINode: func(info *dynamicplugins.PluginInfo) (interface{}, error) { - return csi.NewClient(info.ConnectionInfo.SocketPath, logger.Named("csi_client").With("plugin.name", info.Name, "plugin.type", "client")) - }, - }), } c.batchNodeUpdates = newBatchNodeUpdates( @@ -363,11 +355,22 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulServic // Start server manager rebalancing go routine go c.servers.Start() - // Initialize the client + // initialize the client if err := c.init(); err != nil { return nil, fmt.Errorf("failed to initialize client: %v", err) } + // initialize the dynamic registry (needs to happen after init) + c.dynamicRegistry = + dynamicplugins.NewRegistry(c.stateDB, map[string]dynamicplugins.PluginDispenser{ + dynamicplugins.PluginTypeCSIController: func(info *dynamicplugins.PluginInfo) (interface{}, error) { + return csi.NewClient(info.ConnectionInfo.SocketPath, logger.Named("csi_client").With("plugin.name", info.Name, "plugin.type", "controller")) + }, + dynamicplugins.PluginTypeCSINode: func(info *dynamicplugins.PluginInfo) (interface{}, error) { + return csi.NewClient(info.ConnectionInfo.SocketPath, logger.Named("csi_client").With("plugin.name", info.Name, "plugin.type", "client")) + }, // TODO(tgross): refactor these dispenser constructors into csimanager to tidy it up + }) + // Setup the clients RPC server c.setupClientRpc() diff --git a/client/dynamicplugins/registry.go b/client/dynamicplugins/registry.go index e9025e73a..b0739f0b8 100644 --- a/client/dynamicplugins/registry.go +++ b/client/dynamicplugins/registry.go @@ -31,16 +31,48 @@ type Registry interface { StubDispenserForType(ptype string, dispenser PluginDispenser) } +// RegistryState is what we persist in the client state store. It contains +// a map of plugin types to maps of plugin name -> PluginInfo. +type RegistryState struct { + Plugins map[string]map[string]*PluginInfo +} + type PluginDispenser func(info *PluginInfo) (interface{}, error) // NewRegistry takes a map of `plugintype` to PluginDispenser functions // that should be used to vend clients for plugins to be used. -func NewRegistry(dispensers map[string]PluginDispenser) Registry { - return &dynamicRegistry{ +func NewRegistry(state StateStorage, dispensers map[string]PluginDispenser) Registry { + + registry := &dynamicRegistry{ plugins: make(map[string]map[string]*PluginInfo), broadcasters: make(map[string]*pluginEventBroadcaster), dispensers: dispensers, + state: state, } + + // populate the state and initial broadcasters if we have an + // existing state DB to restore + if state != nil { + storedState, err := state.GetDynamicPluginRegistryState() + if err == nil && storedState != nil { + registry.plugins = storedState.Plugins + for ptype := range registry.plugins { + registry.broadcasterForPluginType(ptype) + } + } + } + + return registry +} + +// StateStorage is used to persist the dynamic plugin registry's state +// across agent restarts. +type StateStorage interface { + // GetDynamicPluginRegistryState is used to restore the registry state + GetDynamicPluginRegistryState() (*RegistryState, error) + + // PutDynamicPluginRegistryState is used to store the registry state + PutDynamicPluginRegistryState(state *RegistryState) error } // PluginInfo is the metadata that is stored by the registry for a given plugin. @@ -98,6 +130,8 @@ type dynamicRegistry struct { dispensers map[string]PluginDispenser stubDispensers map[string]PluginDispenser + + state StateStorage } // StubDispenserForType allows test functions to provide alternative plugin @@ -159,7 +193,7 @@ func (d *dynamicRegistry) RegisterPlugin(info *PluginInfo) error { } broadcaster.broadcast(event) - return nil + return d.sync() } func (d *dynamicRegistry) broadcasterForPluginType(ptype string) *pluginEventBroadcaster { @@ -210,7 +244,7 @@ func (d *dynamicRegistry) DeregisterPlugin(ptype, name string) error { } broadcaster.broadcast(event) - return nil + return d.sync() } func (d *dynamicRegistry) ListPlugins(ptype string) []*PluginInfo { @@ -296,6 +330,14 @@ func (d *dynamicRegistry) PluginsUpdatedCh(ctx context.Context, ptype string) <- return ch } +func (d *dynamicRegistry) sync() error { + if d.state != nil { + storedState := &RegistryState{Plugins: d.plugins} + return d.state.PutDynamicPluginRegistryState(storedState) + } + return nil +} + func (d *dynamicRegistry) Shutdown() { for _, b := range d.broadcasters { b.shutdown() diff --git a/client/dynamicplugins/registry_test.go b/client/dynamicplugins/registry_test.go index a3feaaac5..3e6513d18 100644 --- a/client/dynamicplugins/registry_test.go +++ b/client/dynamicplugins/registry_test.go @@ -2,6 +2,7 @@ package dynamicplugins import ( "context" + "sync" "testing" "time" @@ -65,7 +66,7 @@ func TestPluginEventBroadcaster_UnsubscribeWorks(t *testing.T) { func TestDynamicRegistry_RegisterPlugin_SendsUpdateEvents(t *testing.T) { t.Parallel() - r := NewRegistry(nil) + r := NewRegistry(nil, nil) ctx, cancelFn := context.WithCancel(context.Background()) defer cancelFn() @@ -103,7 +104,7 @@ func TestDynamicRegistry_RegisterPlugin_SendsUpdateEvents(t *testing.T) { func TestDynamicRegistry_DeregisterPlugin_SendsUpdateEvents(t *testing.T) { t.Parallel() - r := NewRegistry(nil) + r := NewRegistry(nil, nil) ctx, cancelFn := context.WithCancel(context.Background()) defer cancelFn() @@ -148,7 +149,7 @@ func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) { return struct{}{}, nil } - registry := NewRegistry(map[string]PluginDispenser{"csi": dispenseFn}) + registry := NewRegistry(nil, map[string]PluginDispenser{"csi": dispenseFn}) err := registry.RegisterPlugin(&PluginInfo{ Type: "csi", @@ -169,3 +170,51 @@ func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) { require.NotNil(t, result) require.NoError(t, err) } + +func TestDynamicRegistry_StateStore(t *testing.T) { + t.Parallel() + dispenseFn := func(i *PluginInfo) (interface{}, error) { + return i, nil + } + + memdb := &MemDB{} + oldR := NewRegistry(memdb, map[string]PluginDispenser{"csi": dispenseFn}) + + err := oldR.RegisterPlugin(&PluginInfo{ + Type: "csi", + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + require.NoError(t, err) + result, err := oldR.DispensePlugin("csi", "my-plugin") + require.NotNil(t, result) + require.NoError(t, err) + + // recreate the registry from the state store and query again + newR := NewRegistry(memdb, map[string]PluginDispenser{"csi": dispenseFn}) + result, err = newR.DispensePlugin("csi", "my-plugin") + require.NotNil(t, result) + require.NoError(t, err) +} + +// MemDB implements a StateDB that stores data in memory and should only be +// used for testing. All methods are safe for concurrent use. This is a +// partial implementation of the MemDB in the client/state package, copied +// here to avoid circular dependencies. +type MemDB struct { + dynamicManagerPs *RegistryState + mu sync.RWMutex +} + +func (m *MemDB) GetDynamicPluginRegistryState() (*RegistryState, error) { + m.mu.Lock() + defer m.mu.Unlock() + return m.dynamicManagerPs, nil +} + +func (m *MemDB) PutDynamicPluginRegistryState(ps *RegistryState) error { + m.mu.Lock() + defer m.mu.Unlock() + m.dynamicManagerPs = ps + return nil +} diff --git a/client/pluginmanager/csimanager/manager.go b/client/pluginmanager/csimanager/manager.go index 9f932e384..d79b1b339 100644 --- a/client/pluginmanager/csimanager/manager.go +++ b/client/pluginmanager/csimanager/manager.go @@ -85,6 +85,9 @@ func (c *csiManager) MounterForVolume(ctx context.Context, vol *structs.CSIVolum // Run starts a plugin manager and should return early func (c *csiManager) Run() { + // Ensure we have at least one full sync before starting + c.resyncPluginsFromRegistry("csi-controller") + c.resyncPluginsFromRegistry("csi-node") go c.runLoop() } diff --git a/client/pluginmanager/csimanager/manager_test.go b/client/pluginmanager/csimanager/manager_test.go index 176dc6003..24b854ab5 100644 --- a/client/pluginmanager/csimanager/manager_test.go +++ b/client/pluginmanager/csimanager/manager_test.go @@ -21,6 +21,7 @@ var fakePlugin = &dynamicplugins.PluginInfo{ func setupRegistry() dynamicplugins.Registry { return dynamicplugins.NewRegistry( + nil, map[string]dynamicplugins.PluginDispenser{ "csi-controller": func(*dynamicplugins.PluginInfo) (interface{}, error) { return nil, nil diff --git a/client/state/db_test.go b/client/state/db_test.go index c37ed8bd2..bb63507a2 100644 --- a/client/state/db_test.go +++ b/client/state/db_test.go @@ -8,6 +8,7 @@ import ( trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" + "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" @@ -238,6 +239,31 @@ func TestStateDB_DriverManager(t *testing.T) { }) } +// TestStateDB_DynamicRegistry asserts the behavior of dynamic registry state related StateDB +// methods. +func TestStateDB_DynamicRegistry(t *testing.T) { + t.Parallel() + + testDB(t, func(t *testing.T, db StateDB) { + require := require.New(t) + + // Getting nonexistent state should return nils + ps, err := db.GetDynamicPluginRegistryState() + require.NoError(err) + require.Nil(ps) + + // Putting PluginState should work + state := &dynamicplugins.RegistryState{} + require.NoError(db.PutDynamicPluginRegistryState(state)) + + // Getting should return the available state + ps, err = db.GetDynamicPluginRegistryState() + require.NoError(err) + require.NotNil(ps) + require.Equal(state, ps) + }) +} + // TestStateDB_Upgrade asserts calling Upgrade on new databases always // succeeds. func TestStateDB_Upgrade(t *testing.T) { diff --git a/client/state/interface.go b/client/state/interface.go index 2624b46ea..dc492d5ec 100644 --- a/client/state/interface.go +++ b/client/state/interface.go @@ -3,6 +3,7 @@ package state import ( "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" + "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" "github.com/hashicorp/nomad/nomad/structs" ) @@ -69,6 +70,12 @@ type StateDB interface { // state. PutDriverPluginState(state *driverstate.PluginState) error + // GetDynamicPluginRegistryState is used to retrieve a dynamic plugin manager's state. + GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) + + // PutDynamicPluginRegistryState is used to store the dynamic plugin managers's state. + PutDynamicPluginRegistryState(state *dynamicplugins.RegistryState) error + // Close the database. Unsafe for further use after calling regardless // of return value. Close() error diff --git a/client/state/memdb.go b/client/state/memdb.go index 5d64870e1..63e967e45 100644 --- a/client/state/memdb.go +++ b/client/state/memdb.go @@ -6,6 +6,7 @@ import ( hclog "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" + "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" "github.com/hashicorp/nomad/nomad/structs" ) @@ -29,6 +30,9 @@ type MemDB struct { // drivermanager -> plugin-state driverManagerPs *driverstate.PluginState + // dynamicmanager -> registry-state + dynamicManagerPs *dynamicplugins.RegistryState + logger hclog.Logger mu sync.RWMutex @@ -193,6 +197,19 @@ func (m *MemDB) PutDriverPluginState(ps *driverstate.PluginState) error { return nil } +func (m *MemDB) GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) { + m.mu.Lock() + defer m.mu.Unlock() + return m.dynamicManagerPs, nil +} + +func (m *MemDB) PutDynamicPluginRegistryState(ps *dynamicplugins.RegistryState) error { + m.mu.Lock() + defer m.mu.Unlock() + m.dynamicManagerPs = ps + return nil +} + func (m *MemDB) Close() error { m.mu.Lock() defer m.mu.Unlock() diff --git a/client/state/noopdb.go b/client/state/noopdb.go index 53364ecba..28fbd2c15 100644 --- a/client/state/noopdb.go +++ b/client/state/noopdb.go @@ -3,6 +3,7 @@ package state import ( "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" + "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" "github.com/hashicorp/nomad/nomad/structs" ) @@ -70,6 +71,14 @@ func (n NoopDB) GetDriverPluginState() (*driverstate.PluginState, error) { return nil, nil } +func (n NoopDB) PutDynamicPluginRegistryState(ps *dynamicplugins.RegistryState) error { + return nil +} + +func (n NoopDB) GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) { + return nil, nil +} + func (n NoopDB) Close() error { return nil } diff --git a/client/state/state_database.go b/client/state/state_database.go index 12d2c083d..a9a958f5f 100644 --- a/client/state/state_database.go +++ b/client/state/state_database.go @@ -11,6 +11,7 @@ import ( hclog "github.com/hashicorp/go-hclog" trstate "github.com/hashicorp/nomad/client/allocrunner/taskrunner/state" dmstate "github.com/hashicorp/nomad/client/devicemanager/state" + "github.com/hashicorp/nomad/client/dynamicplugins" driverstate "github.com/hashicorp/nomad/client/pluginmanager/drivermanager/state" "github.com/hashicorp/nomad/helper/boltdd" "github.com/hashicorp/nomad/nomad/structs" @@ -35,6 +36,9 @@ devicemanager/ drivermanager/ |--> plugin_state -> *driverstate.PluginState + +dynamicplugins/ +|--> registry_state -> *dynamicplugins.RegistryState */ var ( @@ -73,13 +77,20 @@ var ( // data devManagerBucket = []byte("devicemanager") - // driverManagerBucket is the bucket name container all driver manager + // driverManagerBucket is the bucket name containing all driver manager // related data driverManagerBucket = []byte("drivermanager") // managerPluginStateKey is the key by which plugin manager plugin state is // stored at managerPluginStateKey = []byte("plugin_state") + + // dynamicPluginBucket is the bucket name containing all dynamic plugin + // registry data. each dynamic plugin registry will have its own subbucket. + dynamicPluginBucket = []byte("dynamicplugins") + + // registryStateKey is the key at which dynamic plugin registry state is stored + registryStateKey = []byte("registry_state") ) // taskBucketName returns the bucket name for the given task name. @@ -598,6 +609,52 @@ func (s *BoltStateDB) GetDriverPluginState() (*driverstate.PluginState, error) { return ps, nil } +// PutDynamicPluginRegistryState stores the dynamic plugin registry's +// state or returns an error. +func (s *BoltStateDB) PutDynamicPluginRegistryState(ps *dynamicplugins.RegistryState) error { + return s.db.Update(func(tx *boltdd.Tx) error { + // Retrieve the root dynamic plugin manager bucket + dynamicBkt, err := tx.CreateBucketIfNotExists(dynamicPluginBucket) + if err != nil { + return err + } + return dynamicBkt.Put(registryStateKey, ps) + }) +} + +// GetDynamicPluginRegistryState stores the dynamic plugin registry's +// registry state or returns an error. +func (s *BoltStateDB) GetDynamicPluginRegistryState() (*dynamicplugins.RegistryState, error) { + var ps *dynamicplugins.RegistryState + + err := s.db.View(func(tx *boltdd.Tx) error { + dynamicBkt := tx.Bucket(dynamicPluginBucket) + if dynamicBkt == nil { + // No state, return + return nil + } + + // Restore Plugin State if it exists + ps = &dynamicplugins.RegistryState{} + if err := dynamicBkt.Get(registryStateKey, ps); err != nil { + if !boltdd.IsErrNotFound(err) { + return fmt.Errorf("failed to read dynamic plugin registry state: %v", err) + } + + // Key not found, reset ps to nil + ps = nil + } + + return nil + }) + + if err != nil { + return nil, err + } + + return ps, nil +} + // init initializes metadata entries in a newly created state database. func (s *BoltStateDB) init() error { return s.db.Update(func(tx *boltdd.Tx) error { From 0cd2d3cc299689ae4331bfec32592b439e7239eb Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 13 Mar 2020 09:39:24 -0400 Subject: [PATCH 116/126] csi: make claims on volumes idempotent for the same alloc (#7328) Nomad clients will push node updates during client restart which can cause an extra claim for a volume by the same alloc. If an alloc already claims a volume, we can allow it to be treated as a valid claim and continue. --- nomad/structs/csi.go | 8 ++++++++ nomad/structs/csi_test.go | 5 +++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index e76865ac9..704fece53 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -298,6 +298,10 @@ func (v *CSIVolume) Claim(claim CSIVolumeClaimMode, alloc *Allocation) bool { // ClaimRead marks an allocation as using a volume read-only func (v *CSIVolume) ClaimRead(alloc *Allocation) bool { + if _, ok := v.ReadAllocs[alloc.ID]; ok { + return true + } + if !v.CanReadOnly() { return false } @@ -310,6 +314,10 @@ func (v *CSIVolume) ClaimRead(alloc *Allocation) bool { // ClaimWrite marks an allocation as using a volume as a writer func (v *CSIVolume) ClaimWrite(alloc *Allocation) bool { + if _, ok := v.WriteAllocs[alloc.ID]; ok { + return true + } + if !v.CanWrite() { return false } diff --git a/nomad/structs/csi_test.go b/nomad/structs/csi_test.go index 83f2ba2c9..2978baf49 100644 --- a/nomad/structs/csi_test.go +++ b/nomad/structs/csi_test.go @@ -11,7 +11,8 @@ func TestCSIVolumeClaim(t *testing.T) { vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter vol.Schedulable = true - alloc := &Allocation{ID: "al"} + alloc := &Allocation{ID: "a1"} + alloc2 := &Allocation{ID: "a2"} vol.ClaimRead(alloc) require.True(t, vol.CanReadOnly()) @@ -21,7 +22,7 @@ func TestCSIVolumeClaim(t *testing.T) { vol.ClaimWrite(alloc) require.True(t, vol.CanReadOnly()) require.False(t, vol.CanWrite()) - require.False(t, vol.ClaimWrite(alloc)) + require.False(t, vol.ClaimWrite(alloc2)) vol.ClaimRelease(alloc) require.True(t, vol.CanReadOnly()) From cd1c6173f47f0a7ad93d653ff17af65017f76b82 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 16 Mar 2020 08:53:04 -0400 Subject: [PATCH 117/126] csi: e2e tests for EBS and EFS plugins (#7343) This changeset provides two basic e2e tests for CSI plugins targeting common AWS use cases. The EBS test launches the EBS plugin (controller + nodes) and registers an EBS volume as a Nomad CSI volume. We deploy a job that writes to the volume, stop that job, and reuse the volume for another job which should be able to read the data written by the first job. The EFS test launches the EFS plugin (nodes-only) and registers an EFS volume as a Nomad CSI volume. We deploy a job that writes to the volume, stop that job, and reuse the volume for another job which should be able to read the data written by the first job. The writer jobs mount the CSI volume at a location within the alloc dir. --- e2e/.gitignore | 1 + e2e/csi/csi.go | 251 ++++++++++++++++++ e2e/csi/input/plugin-aws-ebs-controller.nomad | 40 +++ e2e/csi/input/plugin-aws-ebs-nodes.nomad | 43 +++ e2e/csi/input/plugin-aws-efs-nodes.nomad | 45 ++++ e2e/csi/input/use-ebs-volume.nomad | 32 +++ e2e/csi/input/use-efs-volume-read.nomad | 33 +++ e2e/csi/input/use-efs-volume-write.nomad | 32 +++ e2e/e2e_test.go | 1 + e2e/terraform/iam.tf | 1 + e2e/terraform/provisioning.tf | 9 + 11 files changed, 488 insertions(+) create mode 100644 e2e/csi/csi.go create mode 100644 e2e/csi/input/plugin-aws-ebs-controller.nomad create mode 100644 e2e/csi/input/plugin-aws-ebs-nodes.nomad create mode 100644 e2e/csi/input/plugin-aws-efs-nodes.nomad create mode 100644 e2e/csi/input/use-ebs-volume.nomad create mode 100644 e2e/csi/input/use-efs-volume-read.nomad create mode 100644 e2e/csi/input/use-efs-volume-write.nomad diff --git a/e2e/.gitignore b/e2e/.gitignore index cfc151d21..adad33a7b 100644 --- a/e2e/.gitignore +++ b/e2e/.gitignore @@ -1 +1,2 @@ provisioning.json +csi/input/volumes.json diff --git a/e2e/csi/csi.go b/e2e/csi/csi.go new file mode 100644 index 000000000..4029e103d --- /dev/null +++ b/e2e/csi/csi.go @@ -0,0 +1,251 @@ +package csi + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "os" + "time" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/e2e/e2eutil" + "github.com/hashicorp/nomad/e2e/framework" + "github.com/hashicorp/nomad/helper/uuid" + "github.com/stretchr/testify/require" +) + +type CSIVolumesTest struct { + framework.TC + jobIds []string + volumeIDs *volumeConfig +} + +func init() { + framework.AddSuites(&framework.TestSuite{ + Component: "CSI", + CanRunLocal: true, + Consul: false, + Cases: []framework.TestCase{ + new(CSIVolumesTest), + }, + }) +} + +type volumeConfig struct { + EBSVolumeID string `json:"ebs_volume"` + EFSVolumeID string `json:"efs_volume"` +} + +func (tc *CSIVolumesTest) BeforeAll(f *framework.F) { + t := f.T() + // The volume IDs come from the external provider, so we need + // to read the configuration out of our Terraform output. + rawjson, err := ioutil.ReadFile("csi/input/volumes.json") + if err != nil { + t.Skip("volume ID configuration not found, try running 'terraform output volumes > ../csi/input/volumes.json'") + } + volumeIDs := &volumeConfig{} + err = json.Unmarshal(rawjson, volumeIDs) + if err != nil { + t.Fatal("volume ID configuration could not be read") + } + + tc.volumeIDs = volumeIDs + + // Ensure cluster has leader and at least two client + // nodes in a ready state before running tests + e2eutil.WaitForLeader(t, tc.Nomad()) + e2eutil.WaitForNodesReady(t, tc.Nomad(), 2) +} + +// TestEBSVolumeClaim launches AWS EBS plugins and registers an EBS volume +// as a Nomad CSI volume. We then deploy a job that writes to the volume, +// stop that job, and reuse the volume for another job which should be able +// to read the data written by the first job. +func (tc *CSIVolumesTest) TestEBSVolumeClaim(f *framework.F) { + t := f.T() + require := require.New(t) + nomadClient := tc.Nomad() + uuid := uuid.Generate() + + // deploy the controller plugin job + controllerJobID := "aws-ebs-plugin-controller-" + uuid[0:8] + tc.jobIds = append(tc.jobIds, controllerJobID) + e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/plugin-aws-ebs-controller.nomad", controllerJobID, "") + + // deploy the node plugins job + nodesJobID := "aws-ebs-plugin-nodes-" + uuid[0:8] + tc.jobIds = append(tc.jobIds, nodesJobID) + e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/plugin-aws-ebs-nodes.nomad", nodesJobID, "") + + // wait for plugin to become healthy + require.Eventually(func() bool { + plugin, _, err := nomadClient.CSIPlugins().Info("aws-ebs0", nil) + if err != nil { + return false + } + if plugin.ControllersHealthy != 1 || plugin.NodesHealthy < 2 { + return false + } + return true + // TODO(tgross): cut down this time after fixing + // https://github.com/hashicorp/nomad/issues/7296 + }, 90*time.Second, 5*time.Second) + + // register a volume + volID := "ebs-vol0" + vol := &api.CSIVolume{ + ID: volID, + Name: volID, + ExternalID: tc.volumeIDs.EBSVolumeID, + AccessMode: "single-node-writer", + AttachmentMode: "file-system", + PluginID: "aws-ebs0", + } + _, err := nomadClient.CSIVolumes().Register(vol, nil) + require.NoError(err) + defer nomadClient.CSIVolumes().Deregister(volID, nil) + + // deploy a job that writes to the volume + writeJobID := "write-ebs-" + uuid[0:8] + tc.jobIds = append(tc.jobIds, writeJobID) + writeAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/use-ebs-volume.nomad", writeJobID, "") + writeAllocID := writeAllocs[0].ID + e2eutil.WaitForAllocRunning(t, nomadClient, writeAllocID) + + // read data from volume and assert the writer wrote a file to it + writeAlloc, _, err := nomadClient.Allocations().Info(writeAllocID, nil) + require.NoError(err) + expectedPath := "/local/test/" + writeAllocID + _, err = readFile(nomadClient, writeAlloc, expectedPath) + require.NoError(err) + + // Shutdown the writer so we can run a reader. + // we could mount the EBS volume with multi-attach, but we + // want this test to exercise the unpublish workflow. + nomadClient.Jobs().Deregister(writeJobID, true, nil) + + // deploy a job so we can read from the volume + readJobID := "read-ebs-" + uuid[0:8] + tc.jobIds = append(tc.jobIds, readJobID) + readAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/use-ebs-volume.nomad", readJobID, "") + readAllocID := readAllocs[0].ID + e2eutil.WaitForAllocRunning(t, nomadClient, readAllocID) + + // ensure we clean up claim before we deregister volumes + defer nomadClient.Jobs().Deregister(readJobID, true, nil) + + // read data from volume and assert the writer wrote a file to it + readAlloc, _, err := nomadClient.Allocations().Info(readAllocID, nil) + require.NoError(err) + _, err = readFile(nomadClient, readAlloc, expectedPath) + require.NoError(err) +} + +// TestEFSVolumeClaim launches AWS EFS plugins and registers an EFS volume +// as a Nomad CSI volume. We then deploy a job that writes to the volume, +// and share the volume with another job which should be able to read the +// data written by the first job. +func (tc *CSIVolumesTest) TestEFSVolumeClaim(f *framework.F) { + t := f.T() + require := require.New(t) + nomadClient := tc.Nomad() + uuid := uuid.Generate() + + // deploy the node plugins job (no need for a controller for EFS) + nodesJobID := "aws-efs-plugin-nodes-" + uuid[0:8] + tc.jobIds = append(tc.jobIds, nodesJobID) + e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/plugin-aws-efs-nodes.nomad", nodesJobID, "") + + // wait for plugin to become healthy + require.Eventually(func() bool { + plugin, _, err := nomadClient.CSIPlugins().Info("aws-efs0", nil) + if err != nil { + return false + } + if plugin.NodesHealthy < 2 { + return false + } + return true + // TODO(tgross): cut down this time after fixing + // https://github.com/hashicorp/nomad/issues/7296 + }, 90*time.Second, 5*time.Second) + + // register a volume + volID := "efs-vol0" + vol := &api.CSIVolume{ + ID: volID, + Name: volID, + ExternalID: tc.volumeIDs.EFSVolumeID, + AccessMode: "single-node-writer", + AttachmentMode: "file-system", + PluginID: "aws-efs0", + } + _, err := nomadClient.CSIVolumes().Register(vol, nil) + require.NoError(err) + defer nomadClient.CSIVolumes().Deregister(volID, nil) + + // deploy a job that writes to the volume + writeJobID := "write-efs-" + uuid[0:8] + writeAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/use-efs-volume-write.nomad", writeJobID, "") + writeAllocID := writeAllocs[0].ID + e2eutil.WaitForAllocRunning(t, nomadClient, writeAllocID) + + // read data from volume and assert the writer wrote a file to it + writeAlloc, _, err := nomadClient.Allocations().Info(writeAllocID, nil) + require.NoError(err) + expectedPath := "/local/test/" + writeAllocID + _, err = readFile(nomadClient, writeAlloc, expectedPath) + require.NoError(err) + + // Shutdown the writer so we can run a reader. + // although EFS should support multiple readers, the plugin + // does not. + nomadClient.Jobs().Deregister(writeJobID, true, nil) + + // deploy a job that reads from the volume. + readJobID := "read-efs-" + uuid[0:8] + readAllocs := e2eutil.RegisterAndWaitForAllocs(t, nomadClient, + "csi/input/use-efs-volume-read.nomad", readJobID, "") + defer nomadClient.Jobs().Deregister(readJobID, true, nil) + e2eutil.WaitForAllocRunning(t, nomadClient, readAllocs[0].ID) + + // read data from volume and assert the writer wrote a file to it + readAlloc, _, err := nomadClient.Allocations().Info(readAllocs[0].ID, nil) + require.NoError(err) + _, err = readFile(nomadClient, readAlloc, expectedPath) + require.NoError(err) +} + +func (tc *CSIVolumesTest) AfterEach(f *framework.F) { + nomadClient := tc.Nomad() + jobs := nomadClient.Jobs() + // Stop all jobs in test + for _, id := range tc.jobIds { + jobs.Deregister(id, true, nil) + } + // Garbage collect + nomadClient.System().GarbageCollect() +} + +// TODO(tgross): replace this w/ AllocFS().Stat() after +// https://github.com/hashicorp/nomad/issues/7365 is fixed +func readFile(client *api.Client, alloc *api.Allocation, path string) (bytes.Buffer, error) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + + var stdout, stderr bytes.Buffer + _, err := client.Allocations().Exec(ctx, + alloc, "task", false, + []string{"cat", path}, + os.Stdin, &stdout, &stderr, + make(chan api.TerminalSize), nil) + return stdout, err +} diff --git a/e2e/csi/input/plugin-aws-ebs-controller.nomad b/e2e/csi/input/plugin-aws-ebs-controller.nomad new file mode 100644 index 000000000..e5caa730c --- /dev/null +++ b/e2e/csi/input/plugin-aws-ebs-controller.nomad @@ -0,0 +1,40 @@ +# jobspec for running CSI plugin for AWS EBS, derived from +# the kubernetes manifests found at +# https://github.com/kubernetes-sigs/aws-ebs-csi-driver/tree/master/deploy/kubernetes + +job "plugin-aws-ebs-controller" { + datacenters = ["dc1"] + + group "controller" { + task "plugin" { + driver = "docker" + + config { + image = "amazon/aws-ebs-csi-driver:latest" + + args = [ + "controller", + "--endpoint=unix://csi/csi.sock", + "--logtostderr", + "--v=5", + ] + + # note: plugins running as controllers don't + # need to run as privileged tasks + } + + csi_plugin { + id = "aws-ebs0" + type = "controller" + mount_dir = "/csi" + } + + # note: there's no upstream guidance on resource usage so + # this is a best guess until we profile it in heavy use + resources { + cpu = 500 + memory = 256 + } + } + } +} diff --git a/e2e/csi/input/plugin-aws-ebs-nodes.nomad b/e2e/csi/input/plugin-aws-ebs-nodes.nomad new file mode 100644 index 000000000..303b2a8e8 --- /dev/null +++ b/e2e/csi/input/plugin-aws-ebs-nodes.nomad @@ -0,0 +1,43 @@ +# jobspec for running CSI plugin for AWS EBS, derived from +# the kubernetes manifests found at +# https://github.com/kubernetes-sigs/aws-ebs-csi-driver/tree/master/deploy/kubernetes + +job "plugin-aws-ebs-nodes" { + datacenters = ["dc1"] + + # you can run node plugins as service jobs as well, but this ensures + # that all nodes in the DC have a copy. + type = "system" + + group "nodes" { + task "plugin" { + driver = "docker" + + config { + image = "amazon/aws-ebs-csi-driver:latest" + + args = [ + "node", + "--endpoint=unix://csi/csi.sock", + "--logtostderr", + "--v=5", + ] + + privileged = true + } + + csi_plugin { + id = "aws-ebs0" + type = "node" + mount_dir = "/csi" + } + + # note: there's no upstream guidance on resource usage so + # this is a best guess until we profile it in heavy use + resources { + cpu = 500 + memory = 256 + } + } + } +} diff --git a/e2e/csi/input/plugin-aws-efs-nodes.nomad b/e2e/csi/input/plugin-aws-efs-nodes.nomad new file mode 100644 index 000000000..8e1429e7a --- /dev/null +++ b/e2e/csi/input/plugin-aws-efs-nodes.nomad @@ -0,0 +1,45 @@ +# jobspec for running CSI plugin for AWS EFS, derived from +# the kubernetes manifests found at +# https://github.com/kubernetes-sigs/aws-efs-csi-driver/tree/master/deploy/kubernetes + +job "plugin-aws-efs-nodes" { + datacenters = ["dc1"] + + # you can run node plugins as service jobs as well, but this ensures + # that all nodes in the DC have a copy. + type = "system" + + group "nodes" { + task "plugin" { + driver = "docker" + + config { + image = "amazon/aws-efs-csi-driver:latest" + + # note: the EFS driver doesn't seem to respect the --endpoint + # flag and always sets up the listener at '/tmp/csi.sock' + args = [ + "node", + "--endpoint=unix://tmp/csi.sock", + "--logtostderr", + "--v=5", + ] + + privileged = true + } + + csi_plugin { + id = "aws-efs0" + type = "node" + mount_dir = "/tmp" + } + + # note: there's no upstream guidance on resource usage so + # this is a best guess until we profile it in heavy use + resources { + cpu = 500 + memory = 256 + } + } + } +} diff --git a/e2e/csi/input/use-ebs-volume.nomad b/e2e/csi/input/use-ebs-volume.nomad new file mode 100644 index 000000000..866a6a4dc --- /dev/null +++ b/e2e/csi/input/use-ebs-volume.nomad @@ -0,0 +1,32 @@ +# a job that mounts an EBS volume and writes its job ID as a file +job "use-ebs-volume" { + datacenters = ["dc1"] + + group "group" { + volume "test" { + type = "csi" + source = "ebs-vol0" + } + + task "task" { + driver = "docker" + + config { + image = "busybox:1" + command = "/bin/sh" + args = ["-c", "touch /local/test/${NOMAD_ALLOC_ID}; sleep 3600"] + } + + volume_mount { + volume = "test" + destination = "${NOMAD_TASK_DIR}/test" + read_only = false + } + + resources { + cpu = 500 + memory = 128 + } + } + } +} diff --git a/e2e/csi/input/use-efs-volume-read.nomad b/e2e/csi/input/use-efs-volume-read.nomad new file mode 100644 index 000000000..12b5f56b2 --- /dev/null +++ b/e2e/csi/input/use-efs-volume-read.nomad @@ -0,0 +1,33 @@ +# a job that mounts the EFS volume and sleeps, so that we can +# read its mounted file system remotely +job "use-efs-volume" { + datacenters = ["dc1"] + + group "group" { + volume "test" { + type = "csi" + source = "efs-vol0" + } + + task "task" { + driver = "docker" + + config { + image = "busybox:1" + command = "/bin/sh" + args = ["-c", "sleep 3600"] + } + + volume_mount { + volume = "test" + destination = "${NOMAD_TASK_DIR}/test" + read_only = true + } + + resources { + cpu = 500 + memory = 128 + } + } + } +} diff --git a/e2e/csi/input/use-efs-volume-write.nomad b/e2e/csi/input/use-efs-volume-write.nomad new file mode 100644 index 000000000..912fa734f --- /dev/null +++ b/e2e/csi/input/use-efs-volume-write.nomad @@ -0,0 +1,32 @@ +# a job that mounts an EFS volume and writes its job ID as a file +job "use-efs-volume" { + datacenters = ["dc1"] + + group "group" { + volume "test" { + type = "csi" + source = "efs-vol0" + } + + task "task" { + driver = "docker" + + config { + image = "busybox:1" + command = "/bin/sh" + args = ["-c", "touch /local/test/${NOMAD_ALLOC_ID}; sleep 3600"] + } + + volume_mount { + volume = "test" + destination = "${NOMAD_TASK_DIR}/test" + read_only = false + } + + resources { + cpu = 500 + memory = 128 + } + } + } +} diff --git a/e2e/e2e_test.go b/e2e/e2e_test.go index 8e3d0bf75..8b63e1b5b 100644 --- a/e2e/e2e_test.go +++ b/e2e/e2e_test.go @@ -13,6 +13,7 @@ import ( _ "github.com/hashicorp/nomad/e2e/connect" _ "github.com/hashicorp/nomad/e2e/consul" _ "github.com/hashicorp/nomad/e2e/consultemplate" + _ "github.com/hashicorp/nomad/e2e/csi" _ "github.com/hashicorp/nomad/e2e/deployment" _ "github.com/hashicorp/nomad/e2e/example" _ "github.com/hashicorp/nomad/e2e/hostvolumes" diff --git a/e2e/terraform/iam.tf b/e2e/terraform/iam.tf index 484d0c1ce..8cf30ed3c 100644 --- a/e2e/terraform/iam.tf +++ b/e2e/terraform/iam.tf @@ -48,6 +48,7 @@ data "aws_iam_policy_document" "auto_discover_cluster" { "ec2:DescribeTags", "ec2:DescribeVolume*", "ec2:AttachVolume", + "ec2:DetachVolume", "autoscaling:DescribeAutoScalingGroups", ] resources = ["*"] diff --git a/e2e/terraform/provisioning.tf b/e2e/terraform/provisioning.tf index 5e68d22ca..5d69b13b4 100644 --- a/e2e/terraform/provisioning.tf +++ b/e2e/terraform/provisioning.tf @@ -9,6 +9,15 @@ export NOMAD_E2E=1 EOM } +output "volumes" { + description = "get volume IDs needed to register volumes for CSI testing." + value = jsonencode( + { + "ebs_volume" : aws_ebs_volume.csi.id, + "efs_volume" : aws_efs_file_system.csi.id, + }) +} + output "provisioning" { description = "output to a file to be use w/ E2E framework -provision.terraform" value = jsonencode( From 22e9f679c3fced9bafaa0cbaf539935ef3bc6114 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 16 Mar 2020 15:59:42 -0400 Subject: [PATCH 118/126] csi: implement controller detach RPCs (#7356) This changeset implements the remaining controller detach RPCs: server-to-client and client-to-controller. The tests also uncovered a bug in our RPC for claims which is fixed here; the volume claim RPC is used for both claiming and releasing a claim on a volume. We should only submit a controller publish RPC when the claim is new and not when it's being released. --- client/csi_controller_endpoint.go | 37 ++++++- client/csi_controller_endpoint_test.go | 83 ++++++++++++++++ nomad/client_csi_endpoint.go | 34 +++++++ nomad/client_csi_endpoint_test.go | 78 +++++++++++++++ nomad/core_sched_test.go | 128 ++++++++++++++++++++++++- nomad/csi_endpoint.go | 13 ++- nomad/csi_endpoint_test.go | 4 +- 7 files changed, 366 insertions(+), 11 deletions(-) diff --git a/client/csi_controller_endpoint.go b/client/csi_controller_endpoint.go index d724f72bd..d1c25c3f0 100644 --- a/client/csi_controller_endpoint.go +++ b/client/csi_controller_endpoint.go @@ -3,7 +3,6 @@ package client import ( "context" "errors" - "fmt" "time" metrics "github.com/armon/go-metrics" @@ -66,7 +65,7 @@ func (c *CSIController) ValidateVolume(req *structs.ClientCSIControllerValidateV // 1. Validate the volume request // 2. Call ControllerPublishVolume on the CSI Plugin to trigger a remote attachment // -// In the future this may be expanded to request dynamic secrets for attachement. +// In the future this may be expanded to request dynamic secrets for attachment. func (c *CSIController) AttachVolume(req *structs.ClientCSIControllerAttachVolumeRequest, resp *structs.ClientCSIControllerAttachVolumeResponse) error { defer metrics.MeasureSince([]string{"client", "csi_controller", "publish_volume"}, time.Now()) plugin, err := c.findControllerPlugin(req.PluginID) @@ -105,8 +104,40 @@ func (c *CSIController) AttachVolume(req *structs.ClientCSIControllerAttachVolum return nil } +// DetachVolume is used to detach a volume from a CSI Cluster from +// the storage node provided in the request. func (c *CSIController) DetachVolume(req *structs.ClientCSIControllerDetachVolumeRequest, resp *structs.ClientCSIControllerDetachVolumeResponse) error { - return fmt.Errorf("Unimplemented") + defer metrics.MeasureSince([]string{"client", "csi_controller", "unpublish_volume"}, time.Now()) + plugin, err := c.findControllerPlugin(req.PluginID) + if err != nil { + return err + } + defer plugin.Close() + + // The following block of validation checks should not be reached on a + // real Nomad cluster as all of this data should be validated when registering + // volumes with the cluster. They serve as a defensive check before forwarding + // requests to plugins, and to aid with development. + + if req.VolumeID == "" { + return errors.New("VolumeID is required") + } + + if req.ClientCSINodeID == "" { + return errors.New("ClientCSINodeID is required") + } + + csiReq := req.ToCSIRequest() + + // Submit the request for a volume to the CSI Plugin. + ctx, cancelFn := c.requestContext() + defer cancelFn() + _, err = plugin.ControllerUnpublishVolume(ctx, csiReq) + if err != nil { + return err + } + + return nil } func (c *CSIController) findControllerPlugin(name string) (csi.CSIPlugin, error) { diff --git a/client/csi_controller_endpoint_test.go b/client/csi_controller_endpoint_test.go index 90795ba0d..777a87e4b 100644 --- a/client/csi_controller_endpoint_test.go +++ b/client/csi_controller_endpoint_test.go @@ -263,3 +263,86 @@ func TestCSIController_ValidateVolume(t *testing.T) { }) } } + +func TestCSIController_DetachVolume(t *testing.T) { + t.Parallel() + + cases := []struct { + Name string + ClientSetupFunc func(*fake.Client) + Request *structs.ClientCSIControllerDetachVolumeRequest + ExpectedErr error + ExpectedResponse *structs.ClientCSIControllerDetachVolumeResponse + }{ + { + Name: "returns plugin not found errors", + Request: &structs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: "some-garbage", + }, + }, + ExpectedErr: errors.New("plugin some-garbage for type csi-controller not found"), + }, + { + Name: "validates volumeid is not empty", + Request: &structs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + }, + ExpectedErr: errors.New("VolumeID is required"), + }, + { + Name: "validates nodeid is not empty", + Request: &structs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + }, + ExpectedErr: errors.New("ClientCSINodeID is required"), + }, + { + Name: "returns transitive errors", + ClientSetupFunc: func(fc *fake.Client) { + fc.NextControllerUnpublishVolumeErr = errors.New("hello") + }, + Request: &structs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: structs.CSIControllerQuery{ + PluginID: fakePlugin.Name, + }, + VolumeID: "1234-4321-1234-4321", + ClientCSINodeID: "abcde", + }, + ExpectedErr: errors.New("hello"), + }, + } + + for _, tc := range cases { + t.Run(tc.Name, func(t *testing.T) { + require := require.New(t) + client, cleanup := TestClient(t, nil) + defer cleanup() + + fakeClient := &fake.Client{} + if tc.ClientSetupFunc != nil { + tc.ClientSetupFunc(fakeClient) + } + + dispenserFunc := func(*dynamicplugins.PluginInfo) (interface{}, error) { + return fakeClient, nil + } + client.dynamicRegistry.StubDispenserForType(dynamicplugins.PluginTypeCSIController, dispenserFunc) + + err := client.dynamicRegistry.RegisterPlugin(fakePlugin) + require.Nil(err) + + var resp structs.ClientCSIControllerDetachVolumeResponse + err = client.ClientRPC("CSIController.DetachVolume", tc.Request, &resp) + require.Equal(tc.ExpectedErr, err) + if tc.ExpectedResponse != nil { + require.Equal(tc.ExpectedResponse, &resp) + } + }) + } +} diff --git a/nomad/client_csi_endpoint.go b/nomad/client_csi_endpoint.go index 36c230ea7..18f60b361 100644 --- a/nomad/client_csi_endpoint.go +++ b/nomad/client_csi_endpoint.go @@ -82,3 +82,37 @@ func (a *ClientCSIController) ValidateVolume(args *cstructs.ClientCSIControllerV } return nil } + +func (a *ClientCSIController) DetachVolume(args *cstructs.ClientCSIControllerDetachVolumeRequest, reply *cstructs.ClientCSIControllerDetachVolumeResponse) error { + defer metrics.MeasureSince([]string{"nomad", "client_csi_controller", "detach_volume"}, time.Now()) + + // Verify the arguments. + if args.ControllerNodeID == "" { + return errors.New("missing ControllerNodeID") + } + + // Make sure Node is valid and new enough to support RPC + snap, err := a.srv.State().Snapshot() + if err != nil { + return err + } + + _, err = getNodeForRpc(snap, args.ControllerNodeID) + if err != nil { + return err + } + + // Get the connection to the client + state, ok := a.srv.getNodeConn(args.ControllerNodeID) + if !ok { + return findNodeConnAndForward(a.srv, args.ControllerNodeID, "ClientCSIController.DetachVolume", args, reply) + } + + // Make the RPC + err = NodeRpc(state.Session, "CSIController.DetachVolume", args, reply) + if err != nil { + return fmt.Errorf("detach volume: %v", err) + } + return nil + +} diff --git a/nomad/client_csi_endpoint_test.go b/nomad/client_csi_endpoint_test.go index 4be3ce99d..56e52cd06 100644 --- a/nomad/client_csi_endpoint_test.go +++ b/nomad/client_csi_endpoint_test.go @@ -89,3 +89,81 @@ func TestClientCSIController_AttachVolume_Forwarded(t *testing.T) { // Should recieve an error from the client endpoint require.Contains(err.Error(), "must specify plugin name to dispense") } + +func TestClientCSIController_DetachVolume_Local(t *testing.T) { + t.Parallel() + require := require.New(t) + + // Start a server and client + s, cleanupS := TestServer(t, nil) + defer cleanupS() + codec := rpcClient(t, s) + testutil.WaitForLeader(t, s.RPC) + + c, cleanupC := client.TestClient(t, func(c *config.Config) { + c.Servers = []string{s.config.RPCAddr.String()} + }) + defer cleanupC() + + testutil.WaitForResult(func() (bool, error) { + nodes := s.connectedNodes() + return len(nodes) == 1, nil + }, func(err error) { + require.Fail("should have a client") + }) + + req := &cstructs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: cstructs.CSIControllerQuery{ControllerNodeID: c.NodeID()}, + } + + // Fetch the response + var resp structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "ClientCSIController.DetachVolume", req, &resp) + require.NotNil(err) + // Should recieve an error from the client endpoint + require.Contains(err.Error(), "must specify plugin name to dispense") +} + +func TestClientCSIController_DetachVolume_Forwarded(t *testing.T) { + t.Parallel() + require := require.New(t) + + // Start a server and client + s1, cleanupS1 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 }) + defer cleanupS1() + s2, cleanupS2 := TestServer(t, func(c *Config) { c.BootstrapExpect = 2 }) + defer cleanupS2() + TestJoin(t, s1, s2) + testutil.WaitForLeader(t, s1.RPC) + testutil.WaitForLeader(t, s2.RPC) + codec := rpcClient(t, s2) + + c, cleanupC := client.TestClient(t, func(c *config.Config) { + c.Servers = []string{s2.config.RPCAddr.String()} + c.GCDiskUsageThreshold = 100.0 + }) + defer cleanupC() + + testutil.WaitForResult(func() (bool, error) { + nodes := s2.connectedNodes() + return len(nodes) == 1, nil + }, func(err error) { + require.Fail("should have a client") + }) + + // Force remove the connection locally in case it exists + s1.nodeConnsLock.Lock() + delete(s1.nodeConns, c.NodeID()) + s1.nodeConnsLock.Unlock() + + req := &cstructs.ClientCSIControllerDetachVolumeRequest{ + CSIControllerQuery: cstructs.CSIControllerQuery{ControllerNodeID: c.NodeID()}, + } + + // Fetch the response + var resp structs.GenericResponse + err := msgpackrpc.CallWithCodec(codec, "ClientCSIController.DetachVolume", req, &resp) + require.NotNil(err) + // Should recieve an error from the client endpoint + require.Contains(err.Error(), "must specify plugin name to dispense") +} diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 99e534565..f00533a79 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -2200,7 +2200,6 @@ func TestCSI_GCVolumeClaims(t *testing.T) { defer shutdown() testutil.WaitForLeader(t, srv.RPC) - // codec := rpcClient(t, srv) state := srv.fsm.State() ws := memdb.NewWatchSet() @@ -2303,3 +2302,130 @@ func TestCSI_GCVolumeClaims(t *testing.T) { require.Len(t, vol.ReadAllocs, 1) require.Len(t, vol.WriteAllocs, 0) } + +func TestCSI_GCVolumeClaims_Controller(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + + state := srv.fsm.State() + ws := memdb.NewWatchSet() + + // Create a client node, plugin, and volume + node := mock.Node() + node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version + node.CSINodePlugins = map[string]*structs.CSIInfo{ + "csi-plugin-example": { + PluginID: "csi-plugin-example", + Healthy: true, + RequiresControllerPlugin: true, + NodeInfo: &structs.CSINodeInfo{}, + }, + } + node.CSIControllerPlugins = map[string]*structs.CSIInfo{ + "csi-plugin-example": { + PluginID: "csi-plugin-example", + Healthy: true, + RequiresControllerPlugin: true, + ControllerInfo: &structs.CSIControllerInfo{ + SupportsReadOnlyAttach: true, + SupportsAttachDetach: true, + SupportsListVolumes: true, + SupportsListVolumesAttachedNodes: false, + }, + }, + } + err := state.UpsertNode(99, node) + require.NoError(t, err) + volId0 := uuid.Generate() + vols := []*structs.CSIVolume{{ + ID: volId0, + Namespace: "notTheNamespace", + PluginID: "csi-plugin-example", + AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + }} + err = state.CSIVolumeRegister(100, vols) + require.NoError(t, err) + vol, err := state.CSIVolumeByID(ws, volId0) + + require.NoError(t, err) + require.True(t, vol.ControllerRequired) + require.Len(t, vol.ReadAllocs, 0) + require.Len(t, vol.WriteAllocs, 0) + + // Create a job with 2 allocations + job := mock.Job() + job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{ + "_": { + Name: "someVolume", + Type: structs.VolumeTypeCSI, + Source: volId0, + ReadOnly: false, + }, + } + err = state.UpsertJob(101, job) + require.NoError(t, err) + + alloc1 := mock.Alloc() + alloc1.JobID = job.ID + alloc1.NodeID = node.ID + err = state.UpsertJobSummary(102, mock.JobSummary(alloc1.JobID)) + require.NoError(t, err) + alloc1.TaskGroup = job.TaskGroups[0].Name + + alloc2 := mock.Alloc() + alloc2.JobID = job.ID + alloc2.NodeID = node.ID + err = state.UpsertJobSummary(103, mock.JobSummary(alloc2.JobID)) + require.NoError(t, err) + alloc2.TaskGroup = job.TaskGroups[0].Name + + err = state.UpsertAllocs(104, []*structs.Allocation{alloc1, alloc2}) + require.NoError(t, err) + + // Claim the volumes and verify the claims were set + err = state.CSIVolumeClaim(105, volId0, alloc1, structs.CSIVolumeClaimWrite) + require.NoError(t, err) + err = state.CSIVolumeClaim(106, volId0, alloc2, structs.CSIVolumeClaimRead) + require.NoError(t, err) + vol, err = state.CSIVolumeByID(ws, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 1) + require.Len(t, vol.WriteAllocs, 1) + + // Update both allocs as failed/terminated + alloc1.ClientStatus = structs.AllocClientStatusFailed + alloc2.ClientStatus = structs.AllocClientStatusFailed + err = state.UpdateAllocsFromClient(107, []*structs.Allocation{alloc1, alloc2}) + require.NoError(t, err) + + // Create the GC eval we'd get from Node.UpdateAlloc + now := time.Now().UTC() + eval := &structs.Evaluation{ + ID: uuid.Generate(), + Namespace: job.Namespace, + Priority: structs.CoreJobPriority, + Type: structs.JobTypeCore, + TriggeredBy: structs.EvalTriggerAllocStop, + JobID: structs.CoreJobCSIVolumeClaimGC + ":" + job.ID, + LeaderACL: srv.getLeaderAcl(), + Status: structs.EvalStatusPending, + CreateTime: now.UTC().UnixNano(), + ModifyTime: now.UTC().UnixNano(), + } + + // Process the eval + snap, err := state.Snapshot() + require.NoError(t, err) + core := NewCoreScheduler(srv, snap) + err = core.Process(eval) + require.NoError(t, err) + + // Verify both claims were released + vol, err = state.CSIVolumeByID(ws, volId0) + require.NoError(t, err) + require.Len(t, vol.ReadAllocs, 0) + require.Len(t, vol.WriteAllocs, 0) +} diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 79632e436..09dc46c21 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -342,7 +342,7 @@ func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply * return nil } -// Claim claims a volume +// Claim submits a change to a volume claim func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CSIVolumeClaimResponse) error { if done, err := v.srv.forward("CSIVolume.Claim", args, args, reply); done { return err @@ -361,10 +361,13 @@ func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CS return structs.ErrPermissionDenied } - // adds a Volume and PublishContext from the controller (if any) to the reply - err = v.srv.controllerPublishVolume(args, reply) - if err != nil { - return fmt.Errorf("controllerPublish: %v", err) + // if this is a new claim, add a Volume and PublishContext from the + // controller (if any) to the reply + if args.Claim != structs.CSIVolumeClaimRelease { + err = v.srv.controllerPublishVolume(args, reply) + if err != nil { + return fmt.Errorf("controller publish: %v", err) + } } resp, index, err := v.srv.raftApply(structs.CSIVolumeClaimRequestType, args) diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 0bbd5684b..25d954152 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -220,7 +220,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { } claimResp := &structs.CSIVolumeClaimResponse{} err := msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) - require.EqualError(t, err, fmt.Sprintf("controllerPublish: volume not found: %s", id0), + require.EqualError(t, err, fmt.Sprintf("controller publish: volume not found: %s", id0), "expected 'volume not found' error because volume hasn't yet been created") // Create a client node, plugin, alloc, and volume @@ -377,7 +377,7 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { claimResp := &structs.CSIVolumeClaimResponse{} err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp) // Because the node is not registered - require.EqualError(t, err, "controllerPublish: attach volume: No path to node") + require.EqualError(t, err, "controller publish: attach volume: No path to node") } func TestCSIVolumeEndpoint_List(t *testing.T) { From fe926e899ef63d4cda6cdae073123e74c56b7552 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 17 Mar 2020 10:25:03 -0400 Subject: [PATCH 119/126] volumes: add task environment interpolation to volume_mount (#7364) --- client/allocrunner/taskrunner/volume_hook.go | 19 ++++- .../taskrunner/volume_hook_test.go | 73 ++++++++++++++++++- 2 files changed, 88 insertions(+), 4 deletions(-) diff --git a/client/allocrunner/taskrunner/volume_hook.go b/client/allocrunner/taskrunner/volume_hook.go index 5e35f99d3..5447c5029 100644 --- a/client/allocrunner/taskrunner/volume_hook.go +++ b/client/allocrunner/taskrunner/volume_hook.go @@ -7,14 +7,16 @@ import ( log "github.com/hashicorp/go-hclog" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/nomad/client/allocrunner/interfaces" + "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" ) type volumeHook struct { - alloc *structs.Allocation - runner *TaskRunner - logger log.Logger + alloc *structs.Allocation + runner *TaskRunner + logger log.Logger + taskEnv *taskenv.TaskEnv } func newVolumeHook(runner *TaskRunner, logger log.Logger) *volumeHook { @@ -169,6 +171,9 @@ func (h *volumeHook) prepareCSIVolumes(req *interfaces.TaskPrestartRequest, volu } func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartRequest, resp *interfaces.TaskPrestartResponse) error { + h.taskEnv = req.TaskEnv + interpolateVolumeMounts(req.Task.VolumeMounts, h.taskEnv) + volumes := partitionVolumesByType(h.alloc.Job.LookupTaskGroup(h.alloc.TaskGroup).Volumes) hostVolumeMounts, err := h.prepareHostVolumes(req, volumes[structs.VolumeTypeHost]) @@ -196,3 +201,11 @@ func (h *volumeHook) Prestart(ctx context.Context, req *interfaces.TaskPrestartR return nil } + +func interpolateVolumeMounts(mounts []*structs.VolumeMount, taskEnv *taskenv.TaskEnv) { + for _, mount := range mounts { + mount.Volume = taskEnv.ReplaceEnv(mount.Volume) + mount.Destination = taskEnv.ReplaceEnv(mount.Destination) + mount.PropagationMode = taskEnv.ReplaceEnv(mount.PropagationMode) + } +} diff --git a/client/allocrunner/taskrunner/volume_hook_test.go b/client/allocrunner/taskrunner/volume_hook_test.go index 8c0e924fb..abe3a5848 100644 --- a/client/allocrunner/taskrunner/volume_hook_test.go +++ b/client/allocrunner/taskrunner/volume_hook_test.go @@ -6,7 +6,9 @@ import ( "github.com/hashicorp/nomad/client/allocrunner/interfaces" "github.com/hashicorp/nomad/client/pluginmanager/csimanager" cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" "github.com/stretchr/testify/require" @@ -86,7 +88,7 @@ func TestVolumeHook_prepareCSIVolumes(t *testing.T) { tr := &TaskRunner{ allocHookResources: &cstructs.AllocHookResources{ CSIMounts: map[string]*csimanager.MountInfo{ - "foo": &csimanager.MountInfo{ + "foo": { Source: "/mnt/my-test-volume", }, }, @@ -109,3 +111,72 @@ func TestVolumeHook_prepareCSIVolumes(t *testing.T) { require.NoError(t, err) require.Equal(t, expected, mounts) } + +func TestVolumeHook_Interpolation(t *testing.T) { + + alloc := mock.Alloc() + task := alloc.Job.TaskGroups[0].Tasks[0] + taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, "global").SetHookEnv("volume", + map[string]string{ + "PROPAGATION_MODE": "private", + "VOLUME_ID": "my-other-volume", + }, + ).Build() + + mounts := []*structs.VolumeMount{ + { + Volume: "foo", + Destination: "/tmp", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "foo", + Destination: "/bar-${NOMAD_JOB_NAME}", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "${VOLUME_ID}", + Destination: "/baz", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "foo", + Destination: "/quux", + ReadOnly: false, + PropagationMode: "${PROPAGATION_MODE}", + }, + } + + expected := []*structs.VolumeMount{ + { + Volume: "foo", + Destination: "/tmp", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "foo", + Destination: "/bar-my-job", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "my-other-volume", + Destination: "/baz", + ReadOnly: false, + PropagationMode: "bidirectional", + }, + { + Volume: "foo", + Destination: "/quux", + ReadOnly: false, + PropagationMode: "private", + }, + } + + interpolateVolumeMounts(mounts, taskEnv) + require.Equal(t, expected, mounts) +} From 3621df1dbf4dd336e93c1fe623c429c6d8162fda Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Tue, 17 Mar 2020 11:35:34 -0400 Subject: [PATCH 120/126] csi: volume ids are only unique per namespace (#7358) * nomad/state/schema: use the namespace compound index * scheduler/scheduler: CSIVolumeByID interface signature namespace * scheduler/stack: SetJob on CSIVolumeChecker to capture namespace * scheduler/feasible: pass the captured namespace to CSIVolumeByID * nomad/state/state_store: use namespace in csi_volume index * nomad/fsm: pass namespace to CSIVolumeDeregister & Claim * nomad/core_sched: pass the namespace in volumeClaimReap * nomad/node_endpoint_test: namespaces in Claim testing * nomad/csi_endpoint: pass RequestNamespace to state.* * nomad/csi_endpoint_test: appropriately failed test * command/alloc_status_test: appropriately failed test * node_endpoint_test: avoid notTheNamespace for the job * scheduler/feasible_test: call SetJob to capture the namespace * nomad/csi_endpoint: ACL check the req namespace, query by namespace * nomad/state/state_store: remove deregister namespace check * nomad/state/state_store: remove unused CSIVolumes * scheduler/feasible: CSIVolumeChecker SetJob -> SetNamespace * nomad/csi_endpoint: ACL check * nomad/state/state_store_test: remove call to state.CSIVolumes * nomad/core_sched_test: job namespace match so claim gc works --- command/alloc_status_test.go | 2 +- nomad/core_sched.go | 2 +- nomad/core_sched_test.go | 13 ++++---- nomad/csi_endpoint.go | 40 +++++++++--------------- nomad/csi_endpoint_test.go | 45 +++++++++++++++------------ nomad/fsm.go | 4 +-- nomad/node_endpoint_test.go | 11 ++++--- nomad/state/schema.go | 11 +++++-- nomad/state/state_store.go | 54 ++++++++++++++++----------------- nomad/state/state_store_test.go | 28 +++++++++-------- scheduler/feasible.go | 11 +++++-- scheduler/feasible_test.go | 2 ++ scheduler/scheduler.go | 2 +- scheduler/stack.go | 1 + 14 files changed, 120 insertions(+), 106 deletions(-) diff --git a/command/alloc_status_test.go b/command/alloc_status_test.go index 7ae840472..6f5b35c51 100644 --- a/command/alloc_status_test.go +++ b/command/alloc_status_test.go @@ -408,7 +408,7 @@ func TestAllocStatusCommand_CSIVolumes(t *testing.T) { vols := []*structs.CSIVolume{{ ID: vol0, - Namespace: "notTheNamespace", + Namespace: structs.DefaultNamespace, PluginID: "minnie", AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, diff --git a/nomad/core_sched.go b/nomad/core_sched.go index 601e66fe4..85781c890 100644 --- a/nomad/core_sched.go +++ b/nomad/core_sched.go @@ -753,7 +753,7 @@ func (c *CoreScheduler) volumeClaimReap(jobs []*structs.Job, leaderACL string) e continue // filter to just CSI volumes } volID := tgVolume.Source - vol, err := c.srv.State().CSIVolumeByID(ws, volID) + vol, err := c.srv.State().CSIVolumeByID(ws, job.Namespace, volID) if err != nil { result = multierror.Append(result, err) continue diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index f00533a79..27f534877 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -2215,16 +2215,17 @@ func TestCSI_GCVolumeClaims(t *testing.T) { err := state.UpsertNode(99, node) require.NoError(t, err) volId0 := uuid.Generate() + ns := structs.DefaultNamespace vols := []*structs.CSIVolume{{ ID: volId0, - Namespace: "notTheNamespace", + Namespace: ns, PluginID: "csi-plugin-example", AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }} err = state.CSIVolumeRegister(100, vols) require.NoError(t, err) - vol, err := state.CSIVolumeByID(ws, volId0) + vol, err := state.CSIVolumeByID(ws, ns, volId0) require.NoError(t, err) require.Len(t, vol.ReadAllocs, 0) require.Len(t, vol.WriteAllocs, 0) @@ -2260,11 +2261,11 @@ func TestCSI_GCVolumeClaims(t *testing.T) { require.NoError(t, err) // Claim the volumes and verify the claims were set - err = state.CSIVolumeClaim(105, volId0, alloc1, structs.CSIVolumeClaimWrite) + err = state.CSIVolumeClaim(105, ns, volId0, alloc1, structs.CSIVolumeClaimWrite) require.NoError(t, err) - err = state.CSIVolumeClaim(106, volId0, alloc2, structs.CSIVolumeClaimRead) + err = state.CSIVolumeClaim(106, ns, volId0, alloc2, structs.CSIVolumeClaimRead) require.NoError(t, err) - vol, err = state.CSIVolumeByID(ws, volId0) + vol, err = state.CSIVolumeByID(ws, ns, volId0) require.NoError(t, err) require.Len(t, vol.ReadAllocs, 1) require.Len(t, vol.WriteAllocs, 1) @@ -2297,7 +2298,7 @@ func TestCSI_GCVolumeClaims(t *testing.T) { require.NoError(t, err) // Verify the claim was released - vol, err = state.CSIVolumeByID(ws, volId0) + vol, err = state.CSIVolumeByID(ws, ns, volId0) require.NoError(t, err) require.Len(t, vol.ReadAllocs, 1) require.Len(t, vol.WriteAllocs, 0) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 09dc46c21..3a6981e1a 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -101,6 +101,10 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV return err } + if !allowCSIAccess(aclObj, args.RequestNamespace()) { + return structs.ErrPermissionDenied + } + metricsStart := time.Now() defer metrics.MeasureSince([]string{"nomad", "volume", "list"}, metricsStart) @@ -114,11 +118,11 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV var iter memdb.ResultIterator if args.NodeID != "" { - iter, err = state.CSIVolumesByNodeID(ws, args.NodeID) + iter, err = state.CSIVolumesByNodeID(ws, ns, args.NodeID) } else if args.PluginID != "" { - iter, err = state.CSIVolumesByPluginID(ws, args.PluginID) + iter, err = state.CSIVolumesByPluginID(ws, ns, args.PluginID) } else { - iter, err = state.CSIVolumes(ws) + iter, err = state.CSIVolumesByNamespace(ws, ns) } if err != nil { @@ -127,7 +131,6 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV // Collect results, filter by ACL access var vs []*structs.CSIVolListStub - cache := map[string]bool{} for { raw := iter.Next() @@ -141,26 +144,12 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV return err } - // Filter on the request namespace to avoid ACL checks by volume - if ns != "" && vol.Namespace != args.RequestNamespace() { - continue - } - // Filter (possibly again) on PluginID to handle passing both NodeID and PluginID if args.PluginID != "" && args.PluginID != vol.PluginID { continue } - // Cache ACL checks QUESTION: are they expensive? - allowed, ok := cache[vol.Namespace] - if !ok { - allowed = allowCSIAccess(aclObj, vol.Namespace) - cache[vol.Namespace] = allowed - } - - if allowed { - vs = append(vs, vol.Stub()) - } + vs = append(vs, vol.Stub()) } reply.Volumes = vs return v.srv.replySetIndex(csiVolumeTable, &reply.QueryMeta) @@ -180,7 +169,8 @@ func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVol return err } - if !allowCSIAccess(aclObj, args.RequestNamespace()) { + ns := args.RequestNamespace() + if !allowCSIAccess(aclObj, ns) { return structs.ErrPermissionDenied } @@ -191,7 +181,7 @@ func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVol queryOpts: &args.QueryOptions, queryMeta: &reply.QueryMeta, run: func(ws memdb.WatchSet, state *state.StateStore) error { - vol, err := state.CSIVolumeByID(ws, args.ID) + vol, err := state.CSIVolumeByID(ws, ns, args.ID) if err != nil { return err } @@ -487,7 +477,7 @@ func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPlu // controllerPublishVolume sends publish request to the CSI controller // plugin associated with a volume, if any. func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, resp *structs.CSIVolumeClaimResponse) error { - plug, vol, err := srv.volAndPluginLookup(req.VolumeID) + plug, vol, err := srv.volAndPluginLookup(req.RequestNamespace(), req.VolumeID) if err != nil { return err } @@ -560,7 +550,7 @@ func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, r // controller plugin associated with a volume, if any. // TODO: the only caller of this won't have an alloc pointer handy, should it be its own request arg type? func (srv *Server) controllerUnpublishVolume(req *structs.CSIVolumeClaimRequest, targetNomadNodeID string) error { - plug, vol, err := srv.volAndPluginLookup(req.VolumeID) + plug, vol, err := srv.volAndPluginLookup(req.RequestNamespace(), req.VolumeID) if plug == nil || vol == nil || err != nil { return err // possibly nil if no controller required } @@ -598,11 +588,11 @@ func (srv *Server) controllerUnpublishVolume(req *structs.CSIVolumeClaimRequest, return srv.RPC(method, cReq, &cstructs.ClientCSIControllerDetachVolumeResponse{}) } -func (srv *Server) volAndPluginLookup(volID string) (*structs.CSIPlugin, *structs.CSIVolume, error) { +func (srv *Server) volAndPluginLookup(namespace, volID string) (*structs.CSIPlugin, *structs.CSIVolume, error) { state := srv.fsm.State() ws := memdb.NewWatchSet() - vol, err := state.CSIVolumeByID(ws, volID) + vol, err := state.CSIVolumeByID(ws, namespace, volID) if err != nil { return nil, nil, err } diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 25d954152..46987ed52 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -201,7 +201,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { defer shutdown() testutil.WaitForLeader(t, srv.RPC) - ns := structs.DefaultNamespace + ns := "not-default-ns" state := srv.fsm.State() codec := rpcClient(t, srv) id0 := uuid.Generate() @@ -237,7 +237,7 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { vols := []*structs.CSIVolume{{ ID: id0, - Namespace: "notTheNamespace", + Namespace: ns, PluginID: "minnie", AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, @@ -262,7 +262,8 @@ func TestCSIVolumeEndpoint_Claim(t *testing.T) { volGetReq := &structs.CSIVolumeGetRequest{ ID: id0, QueryOptions: structs.QueryOptions{ - Region: "global", + Region: "global", + Namespace: ns, }, } volGetResp := &structs.CSIVolumeGetResponse{} @@ -349,7 +350,7 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { require.NoError(t, err) vols := []*structs.CSIVolume{{ ID: id0, - Namespace: "notTheNamespace", + Namespace: ns, PluginID: "minnie", ControllerRequired: true, AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, @@ -394,11 +395,11 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { state := srv.fsm.State() state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) srv.config.ACLEnabled = true - - policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) - nsTok := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) codec := rpcClient(t, srv) + nsPolicy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) + nsTok := mock.CreatePolicyAndToken(t, state, 1000, "csi-access", nsPolicy) + id0 := uuid.Generate() id1 := uuid.Generate() id2 := uuid.Generate() @@ -423,21 +424,23 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, PluginID: "paddy", }} - err := state.CSIVolumeRegister(999, vols) + err := state.CSIVolumeRegister(1002, vols) require.NoError(t, err) var resp structs.CSIVolumeListResponse - // Query all, ACL only allows ns + // Query everything in the namespace req := &structs.CSIVolumeListRequest{ QueryOptions: structs.QueryOptions{ Region: "global", AuthToken: nsTok.SecretID, + Namespace: ns, }, } err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp) require.NoError(t, err) - require.Equal(t, uint64(999), resp.Index) + + require.Equal(t, uint64(1002), resp.Index) require.Equal(t, 2, len(resp.Volumes)) ids := map[string]bool{vols[0].ID: true, vols[1].ID: true} for _, v := range resp.Volumes { @@ -445,7 +448,7 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { } require.Equal(t, 0, len(ids)) - // Query by PluginID + // Query by PluginID in ns req = &structs.CSIVolumeListRequest{ PluginID: "adam", QueryOptions: structs.QueryOptions{ @@ -459,18 +462,21 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { require.Equal(t, 1, len(resp.Volumes)) require.Equal(t, vols[1].ID, resp.Volumes[0].ID) - // Query by PluginID, ACL filters all results + // Query by PluginID in ms + msPolicy := mock.NamespacePolicy(ms, "", []string{acl.NamespaceCapabilityCSIAccess}) + msTok := mock.CreatePolicyAndToken(t, state, 1003, "csi-access", msPolicy) + req = &structs.CSIVolumeListRequest{ PluginID: "paddy", QueryOptions: structs.QueryOptions{ Region: "global", Namespace: ms, - AuthToken: nsTok.SecretID, + AuthToken: msTok.SecretID, }, } err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp) require.NoError(t, err) - require.Equal(t, 0, len(resp.Volumes)) + require.Equal(t, 1, len(resp.Volumes)) } func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { @@ -541,6 +547,7 @@ func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { id0 := uuid.Generate() id1 := uuid.Generate() id2 := uuid.Generate() + ns := "notTheNamespace" // Create a client node with a plugin node := mock.Node() @@ -557,7 +564,7 @@ func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { vols := []*structs.CSIVolume{ { ID: id0, - Namespace: "notTheNamespace", + Namespace: ns, PluginID: "minnie", AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, @@ -565,7 +572,7 @@ func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { }, { ID: id1, - Namespace: "notTheNamespace", + Namespace: ns, PluginID: "adam", AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, @@ -576,19 +583,19 @@ func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { require.NoError(t, err) // has controller - plugin, vol, err := srv.volAndPluginLookup(id0) + plugin, vol, err := srv.volAndPluginLookup(ns, id0) require.NotNil(t, plugin) require.NotNil(t, vol) require.NoError(t, err) // no controller - plugin, vol, err = srv.volAndPluginLookup(id1) + plugin, vol, err = srv.volAndPluginLookup(ns, id1) require.Nil(t, plugin) require.NotNil(t, vol) require.NoError(t, err) // doesn't exist - plugin, vol, err = srv.volAndPluginLookup(id2) + plugin, vol, err = srv.volAndPluginLookup(ns, id2) require.Nil(t, plugin) require.Nil(t, vol) require.EqualError(t, err, fmt.Sprintf("volume not found: %s", id2)) diff --git a/nomad/fsm.go b/nomad/fsm.go index 4433748c5..2dbaee2f5 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -1142,7 +1142,7 @@ func (n *nomadFSM) applyCSIVolumeDeregister(buf []byte, index uint64) interface{ } defer metrics.MeasureSince([]string{"nomad", "fsm", "apply_csi_volume_deregister"}, time.Now()) - if err := n.state.CSIVolumeDeregister(index, req.VolumeIDs); err != nil { + if err := n.state.CSIVolumeDeregister(index, req.RequestNamespace(), req.VolumeIDs); err != nil { n.logger.Error("CSIVolumeDeregister failed", "error", err) return err } @@ -1172,7 +1172,7 @@ func (n *nomadFSM) applyCSIVolumeClaim(buf []byte, index uint64) interface{} { return structs.ErrUnknownAllocationPrefix } - if err := n.state.CSIVolumeClaim(index, req.VolumeID, alloc, req.Claim); err != nil { + if err := n.state.CSIVolumeClaim(index, req.RequestNamespace(), req.VolumeID, alloc, req.Claim); err != nil { n.logger.Error("CSIVolumeClaim failed", "error", err) return err } diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index 478ea97ca..cd20482f4 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -2335,16 +2335,17 @@ func TestClientEndpoint_UpdateAlloc_UnclaimVolumes(t *testing.T) { err := state.UpsertNode(99, node) require.NoError(t, err) volId0 := uuid.Generate() + ns := "notTheNamespace" vols := []*structs.CSIVolume{{ ID: volId0, - Namespace: "notTheNamespace", + Namespace: ns, PluginID: "csi-plugin-example", AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }} err = state.CSIVolumeRegister(100, vols) require.NoError(t, err) - vol, err := state.CSIVolumeByID(ws, volId0) + vol, err := state.CSIVolumeByID(ws, ns, volId0) require.NoError(t, err) require.Len(t, vol.ReadAllocs, 0) require.Len(t, vol.WriteAllocs, 0) @@ -2380,11 +2381,11 @@ func TestClientEndpoint_UpdateAlloc_UnclaimVolumes(t *testing.T) { require.NoError(t, err) // Claim the volumes and verify the claims were set - err = state.CSIVolumeClaim(105, volId0, alloc1, structs.CSIVolumeClaimWrite) + err = state.CSIVolumeClaim(105, ns, volId0, alloc1, structs.CSIVolumeClaimWrite) require.NoError(t, err) - err = state.CSIVolumeClaim(106, volId0, alloc2, structs.CSIVolumeClaimRead) + err = state.CSIVolumeClaim(106, ns, volId0, alloc2, structs.CSIVolumeClaimRead) require.NoError(t, err) - vol, err = state.CSIVolumeByID(ws, volId0) + vol, err = state.CSIVolumeByID(ws, ns, volId0) require.NoError(t, err) require.Len(t, vol.ReadAllocs, 1) require.Len(t, vol.WriteAllocs, 1) diff --git a/nomad/state/schema.go b/nomad/state/schema.go index 8a7db3e6e..b58bf6855 100644 --- a/nomad/state/schema.go +++ b/nomad/state/schema.go @@ -689,8 +689,15 @@ func csiVolumeTableSchema() *memdb.TableSchema { Name: "id", AllowMissing: false, Unique: true, - Indexer: &memdb.StringFieldIndex{ - Field: "ID", + Indexer: &memdb.CompoundIndex{ + Indexes: []memdb.Indexer{ + &memdb.StringFieldIndex{ + Field: "Namespace", + }, + &memdb.StringFieldIndex{ + Field: "ID", + }, + }, }, }, "plugin_id": { diff --git a/nomad/state/state_store.go b/nomad/state/state_store.go index d7eb45f29..ad2493d39 100644 --- a/nomad/state/state_store.go +++ b/nomad/state/state_store.go @@ -1627,7 +1627,7 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum for _, v := range volumes { // Check for volume existence - obj, err := txn.First("csi_volumes", "id", v.ID) + obj, err := txn.First("csi_volumes", "id", v.Namespace, v.ID) if err != nil { return fmt.Errorf("volume existence check error: %v", err) } @@ -1656,10 +1656,10 @@ func (s *StateStore) CSIVolumeRegister(index uint64, volumes []*structs.CSIVolum // CSIVolumeByID is used to lookup a single volume. Returns a copy of the volume // because its plugins are denormalized to provide accurate Health. -func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, id string) (*structs.CSIVolume, error) { +func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, namespace, id string) (*structs.CSIVolume, error) { txn := s.db.Txn(false) - watchCh, obj, err := txn.FirstWatch("csi_volumes", "id_prefix", id) + watchCh, obj, err := txn.FirstWatch("csi_volumes", "id_prefix", namespace, id) if err != nil { return nil, fmt.Errorf("volume lookup failed: %s %v", id, err) } @@ -1674,28 +1674,13 @@ func (s *StateStore) CSIVolumeByID(ws memdb.WatchSet, id string) (*structs.CSIVo } // CSIVolumes looks up csi_volumes by pluginID -func (s *StateStore) CSIVolumesByPluginID(ws memdb.WatchSet, pluginID string) (memdb.ResultIterator, error) { +func (s *StateStore) CSIVolumesByPluginID(ws memdb.WatchSet, namespace, pluginID string) (memdb.ResultIterator, error) { txn := s.db.Txn(false) iter, err := txn.Get("csi_volumes", "plugin_id", pluginID) if err != nil { return nil, fmt.Errorf("volume lookup failed: %v", err) } - ws.Add(iter.WatchCh()) - - return iter, nil -} - -// CSIVolumesByIDPrefix supports search -func (s *StateStore) CSIVolumesByIDPrefix(ws memdb.WatchSet, namespace, volumeID string) (memdb.ResultIterator, error) { - txn := s.db.Txn(false) - - iter, err := txn.Get("csi_volumes", "id_prefix", volumeID) - if err != nil { - return nil, err - } - - ws.Add(iter.WatchCh()) // Filter the iterator by namespace f := func(raw interface{}) bool { @@ -1710,8 +1695,21 @@ func (s *StateStore) CSIVolumesByIDPrefix(ws memdb.WatchSet, namespace, volumeID return wrap, nil } +// CSIVolumesByIDPrefix supports search +func (s *StateStore) CSIVolumesByIDPrefix(ws memdb.WatchSet, namespace, volumeID string) (memdb.ResultIterator, error) { + txn := s.db.Txn(false) + + iter, err := txn.Get("csi_volumes", "id_prefix", namespace, volumeID) + if err != nil { + return nil, err + } + + ws.Add(iter.WatchCh()) + return iter, nil +} + // CSIVolumesByNodeID looks up CSIVolumes in use on a node -func (s *StateStore) CSIVolumesByNodeID(ws memdb.WatchSet, nodeID string) (memdb.ResultIterator, error) { +func (s *StateStore) CSIVolumesByNodeID(ws memdb.WatchSet, namespace, nodeID string) (memdb.ResultIterator, error) { allocs, err := s.AllocsByNode(ws, nodeID) if err != nil { return nil, fmt.Errorf("alloc lookup failed: %v", err) @@ -1749,7 +1747,7 @@ func (s *StateStore) CSIVolumesByNodeID(ws memdb.WatchSet, nodeID string) (memdb iter := NewSliceIterator() txn := s.db.Txn(false) for id := range ids { - raw, err := txn.First("csi_volumes", "id", id) + raw, err := txn.First("csi_volumes", "id", namespace, id) if err != nil { return nil, fmt.Errorf("volume lookup failed: %s %v", id, err) } @@ -1759,11 +1757,11 @@ func (s *StateStore) CSIVolumesByNodeID(ws memdb.WatchSet, nodeID string) (memdb return iter, nil } -// CSIVolumes looks up the entire csi_volumes table -func (s *StateStore) CSIVolumes(ws memdb.WatchSet) (memdb.ResultIterator, error) { +// CSIVolumesByNamespace looks up the entire csi_volumes table +func (s *StateStore) CSIVolumesByNamespace(ws memdb.WatchSet, namespace string) (memdb.ResultIterator, error) { txn := s.db.Txn(false) - iter, err := txn.Get("csi_volumes", "id") + iter, err := txn.Get("csi_volumes", "id_prefix", namespace, "") if err != nil { return nil, fmt.Errorf("volume lookup failed: %v", err) } @@ -1773,11 +1771,11 @@ func (s *StateStore) CSIVolumes(ws memdb.WatchSet) (memdb.ResultIterator, error) } // CSIVolumeClaim updates the volume's claim count and allocation list -func (s *StateStore) CSIVolumeClaim(index uint64, id string, alloc *structs.Allocation, claim structs.CSIVolumeClaimMode) error { +func (s *StateStore) CSIVolumeClaim(index uint64, namespace, id string, alloc *structs.Allocation, claim structs.CSIVolumeClaimMode) error { txn := s.db.Txn(true) defer txn.Abort() - row, err := txn.First("csi_volumes", "id", id) + row, err := txn.First("csi_volumes", "id", namespace, id) if err != nil { return fmt.Errorf("volume lookup failed: %s: %v", id, err) } @@ -1814,12 +1812,12 @@ func (s *StateStore) CSIVolumeClaim(index uint64, id string, alloc *structs.Allo } // CSIVolumeDeregister removes the volume from the server -func (s *StateStore) CSIVolumeDeregister(index uint64, ids []string) error { +func (s *StateStore) CSIVolumeDeregister(index uint64, namespace string, ids []string) error { txn := s.db.Txn(true) defer txn.Abort() for _, id := range ids { - existing, err := txn.First("csi_volumes", "id_prefix", id) + existing, err := txn.First("csi_volumes", "id_prefix", namespace, id) if err != nil { return fmt.Errorf("volume lookup failed: %s: %v", id, err) } diff --git a/nomad/state/state_store_test.go b/nomad/state/state_store_test.go index d498a2498..97522e2a7 100644 --- a/nomad/state/state_store_test.go +++ b/nomad/state/state_store_test.go @@ -2874,9 +2874,11 @@ func TestStateStore_CSIVolume(t *testing.T) { err = state.UpsertAllocs(index, []*structs.Allocation{alloc}) require.NoError(t, err) + ns := structs.DefaultNamespace + v0 := structs.NewCSIVolume("foo", index) v0.ID = vol0 - v0.Namespace = "default" + v0.Namespace = ns v0.PluginID = "minnie" v0.Schedulable = true v0.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter @@ -2885,7 +2887,7 @@ func TestStateStore_CSIVolume(t *testing.T) { index++ v1 := structs.NewCSIVolume("foo", index) v1.ID = vol1 - v1.Namespace = "default" + v1.Namespace = ns v1.PluginID = "adam" v1.Schedulable = true v1.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter @@ -2896,7 +2898,7 @@ func TestStateStore_CSIVolume(t *testing.T) { require.NoError(t, err) ws := memdb.NewWatchSet() - iter, err := state.CSIVolumes(ws) + iter, err := state.CSIVolumesByNamespace(ws, ns) require.NoError(t, err) slurp := func(iter memdb.ResultIterator) (vs []*structs.CSIVolume) { @@ -2915,31 +2917,31 @@ func TestStateStore_CSIVolume(t *testing.T) { require.Equal(t, 2, len(vs)) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByPluginID(ws, "minnie") + iter, err = state.CSIVolumesByPluginID(ws, ns, "minnie") require.NoError(t, err) vs = slurp(iter) require.Equal(t, 1, len(vs)) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByNodeID(ws, node.ID) + iter, err = state.CSIVolumesByNodeID(ws, ns, node.ID) require.NoError(t, err) vs = slurp(iter) require.Equal(t, 1, len(vs)) index++ - err = state.CSIVolumeDeregister(index, []string{ + err = state.CSIVolumeDeregister(index, ns, []string{ vol1, }) require.NoError(t, err) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByPluginID(ws, "adam") + iter, err = state.CSIVolumesByPluginID(ws, ns, "adam") require.NoError(t, err) vs = slurp(iter) require.Equal(t, 0, len(vs)) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumes(ws) + iter, err = state.CSIVolumesByNamespace(ws, ns) require.NoError(t, err) vs = slurp(iter) require.Equal(t, 1, len(vs)) @@ -2952,22 +2954,22 @@ func TestStateStore_CSIVolume(t *testing.T) { u := structs.CSIVolumeClaimRelease index++ - err = state.CSIVolumeClaim(index, vol0, a0, r) + err = state.CSIVolumeClaim(index, ns, vol0, a0, r) require.NoError(t, err) index++ - err = state.CSIVolumeClaim(index, vol0, a1, w) + err = state.CSIVolumeClaim(index, ns, vol0, a1, w) require.NoError(t, err) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByPluginID(ws, "minnie") + iter, err = state.CSIVolumesByPluginID(ws, ns, "minnie") require.NoError(t, err) vs = slurp(iter) require.False(t, vs[0].CanWrite()) - err = state.CSIVolumeClaim(2, vol0, a0, u) + err = state.CSIVolumeClaim(2, ns, vol0, a0, u) require.NoError(t, err) ws = memdb.NewWatchSet() - iter, err = state.CSIVolumesByPluginID(ws, "minnie") + iter, err = state.CSIVolumesByPluginID(ws, ns, "minnie") require.NoError(t, err) vs = slurp(iter) require.True(t, vs[0].CanReadOnly()) diff --git a/scheduler/feasible.go b/scheduler/feasible.go index 44a8445f9..2b67a41f6 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -188,8 +188,9 @@ func (h *HostVolumeChecker) hasVolumes(n *structs.Node) bool { } type CSIVolumeChecker struct { - ctx Context - volumes map[string]*structs.VolumeRequest + ctx Context + namespace string + volumes map[string]*structs.VolumeRequest } func NewCSIVolumeChecker(ctx Context) *CSIVolumeChecker { @@ -198,6 +199,10 @@ func NewCSIVolumeChecker(ctx Context) *CSIVolumeChecker { } } +func (c *CSIVolumeChecker) SetNamespace(namespace string) { + c.namespace = namespace +} + func (c *CSIVolumeChecker) SetVolumes(volumes map[string]*structs.VolumeRequest) { xs := make(map[string]*structs.VolumeRequest) // Filter to only CSI Volumes @@ -237,7 +242,7 @@ func (c *CSIVolumeChecker) hasPlugins(n *structs.Node) (bool, string) { for _, req := range c.volumes { // Get the volume to check that it's healthy (there's a healthy controller // and the volume hasn't encountered an error or been marked for GC - vol, err := c.ctx.State().CSIVolumeByID(ws, req.Source) + vol, err := c.ctx.State().CSIVolumeByID(ws, c.namespace, req.Source) if err != nil { return false, FilterConstraintCSIVolumesLookupFailed } diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index e5b49745f..c7eac930e 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -306,6 +306,8 @@ func TestCSIVolumeChecker(t *testing.T) { } checker := NewCSIVolumeChecker(ctx) + checker.SetNamespace(structs.DefaultNamespace) + cases := []struct { Node *structs.Node RequestedVolumes map[string]*structs.VolumeRequest diff --git a/scheduler/scheduler.go b/scheduler/scheduler.go index f7b81ce7d..1b7b68635 100644 --- a/scheduler/scheduler.go +++ b/scheduler/scheduler.go @@ -93,7 +93,7 @@ type State interface { SchedulerConfig() (uint64, *structs.SchedulerConfiguration, error) // CSIVolumeByID fetch CSI volumes, containing controller jobs - CSIVolumeByID(memdb.WatchSet, string) (*structs.CSIVolume, error) + CSIVolumeByID(memdb.WatchSet, string, string) (*structs.CSIVolume, error) } // Planner interface is used to submit a task allocation plan. diff --git a/scheduler/stack.go b/scheduler/stack.go index a02a876f3..c673870b0 100644 --- a/scheduler/stack.go +++ b/scheduler/stack.go @@ -96,6 +96,7 @@ func (s *GenericStack) SetJob(job *structs.Job) { s.nodeAffinity.SetJob(job) s.spread.SetJob(job) s.ctx.Eligibility().SetJob(job) + s.taskGroupCSIVolumes.SetNamespace(job.Namespace) if contextual, ok := s.quota.(ContextualIterator); ok { contextual.SetJob(job) From b596e67f4752d5e6d364a0bac10b9291c65af7af Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Tue, 17 Mar 2020 17:32:39 -0400 Subject: [PATCH 121/126] csi: implement volume ACLs (#7339) * acl/policy: add the volume ACL policies * nomad/csi_endpoint: enforce ACLs for volume access * nomad/search_endpoint_oss: volume acls * acl/acl: add plugin read as a global policy * acl/policy: add PluginPolicy global cap type * nomad/csi_endpoint: check the global plugin ACL policy * nomad/mock/acl: PluginPolicy * nomad/csi_endpoint: fix list rebase * nomad/core_sched_test: new test since #7358 * nomad/csi_endpoint_test: use correct permissions for list * nomad/csi_endpoint: allowCSIMount keeps ACL checks together * nomad/job_endpoint: check mount permission for jobs * nomad/job_endpoint_test: need plugin read, too --- acl/acl.go | 20 +++++++++++++ acl/policy.go | 54 ++++++++++++++++++++++++------------ acl/policy_test.go | 14 ++++++++++ nomad/core_sched_test.go | 13 +++++---- nomad/csi_endpoint.go | 30 ++++++++++++++------ nomad/csi_endpoint_test.go | 19 +++++++------ nomad/job_endpoint.go | 35 +++++++++++++---------- nomad/job_endpoint_test.go | 15 ++++++++-- nomad/mock/acl.go | 5 ++++ nomad/search_endpoint_oss.go | 20 ++++++++++++- 10 files changed, 167 insertions(+), 58 deletions(-) diff --git a/acl/acl.go b/acl/acl.go index 2a6be0e5a..569158968 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -62,6 +62,7 @@ type ACL struct { node string operator string quota string + plugin string } // maxPrivilege returns the policy which grants the most privilege @@ -193,6 +194,9 @@ func NewACL(management bool, policies []*Policy) (*ACL, error) { if policy.Quota != nil { acl.quota = maxPrivilege(acl.quota, policy.Quota.Policy) } + if policy.Plugin != nil { + acl.plugin = maxPrivilege(acl.plugin, policy.Plugin.Policy) + } } // Finalize the namespaces @@ -477,6 +481,22 @@ func (a *ACL) AllowQuotaWrite() bool { } } +// AllowPluginRead checks if read operations are allowed for all plugins +func (a *ACL) AllowPluginRead() bool { + // ACL is nil only if ACLs are disabled + if a == nil { + return true + } + switch { + case a.management: + return true + case a.plugin == PolicyRead: + return true + default: + return false + } +} + // IsManagement checks if this represents a management token func (a *ACL) IsManagement() bool { return a.management diff --git a/acl/policy.go b/acl/policy.go index b077d053e..4287fa6d6 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -35,7 +35,10 @@ const ( NamespaceCapabilitySentinelOverride = "sentinel-override" NamespaceCapabilityPrivilegedTask = "privileged-task" NamespaceCapabilityCSIAccess = "csi-access" - NamespaceCapabilityCSICreateVolume = "csi-create-volume" + NamespaceCapabilityCSIWriteVolume = "csi-write-volume" + NamespaceCapabilityCSIReadVolume = "csi-read-volume" + NamespaceCapabilityCSIListVolume = "csi-list-volume" + NamespaceCapabilityCSIMountVolume = "csi-mount-volume" ) var ( @@ -65,6 +68,7 @@ type Policy struct { Node *NodePolicy `hcl:"node"` Operator *OperatorPolicy `hcl:"operator"` Quota *QuotaPolicy `hcl:"quota"` + Plugin *PluginPolicy `hcl:"plugin"` Raw string `hcl:"-"` } @@ -76,7 +80,8 @@ func (p *Policy) IsEmpty() bool { p.Agent == nil && p.Node == nil && p.Operator == nil && - p.Quota == nil + p.Quota == nil && + p.Plugin == nil } // NamespacePolicy is the policy for a specific namespace @@ -109,6 +114,10 @@ type QuotaPolicy struct { Policy string } +type PluginPolicy struct { + Policy string +} + // isPolicyValid makes sure the given string matches one of the valid policies. func isPolicyValid(policy string) bool { switch policy { @@ -126,7 +135,8 @@ func isNamespaceCapabilityValid(cap string) bool { NamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, NamespaceCapabilityReadLogs, NamespaceCapabilityReadFS, NamespaceCapabilityAllocLifecycle, NamespaceCapabilityAllocExec, NamespaceCapabilityAllocNodeExec, - NamespaceCapabilityCSIAccess, NamespaceCapabilityCSICreateVolume: + NamespaceCapabilityCSIAccess, // TODO(langmartin): remove after plugin caps are done + NamespaceCapabilityCSIReadVolume, NamespaceCapabilityCSIWriteVolume, NamespaceCapabilityCSIListVolume, NamespaceCapabilityCSIMountVolume: return true // Separate the enterprise-only capabilities case NamespaceCapabilitySentinelOverride: @@ -139,25 +149,31 @@ func isNamespaceCapabilityValid(cap string) bool { // expandNamespacePolicy provides the equivalent set of capabilities for // a namespace policy func expandNamespacePolicy(policy string) []string { + read := []string{ + NamespaceCapabilityListJobs, + NamespaceCapabilityReadJob, + NamespaceCapabilityCSIListVolume, + NamespaceCapabilityCSIReadVolume, + } + + write := append(read, []string{ + NamespaceCapabilitySubmitJob, + NamespaceCapabilityDispatchJob, + NamespaceCapabilityReadLogs, + NamespaceCapabilityReadFS, + NamespaceCapabilityAllocExec, + NamespaceCapabilityAllocLifecycle, + NamespaceCapabilityCSIMountVolume, + NamespaceCapabilityCSIWriteVolume, + }...) + switch policy { case PolicyDeny: return []string{NamespaceCapabilityDeny} case PolicyRead: - return []string{ - NamespaceCapabilityListJobs, - NamespaceCapabilityReadJob, - } + return read case PolicyWrite: - return []string{ - NamespaceCapabilityListJobs, - NamespaceCapabilityReadJob, - NamespaceCapabilitySubmitJob, - NamespaceCapabilityDispatchJob, - NamespaceCapabilityReadLogs, - NamespaceCapabilityReadFS, - NamespaceCapabilityAllocExec, - NamespaceCapabilityAllocLifecycle, - } + return write default: return nil } @@ -265,5 +281,9 @@ func Parse(rules string) (*Policy, error) { if p.Quota != nil && !isPolicyValid(p.Quota.Policy) { return nil, fmt.Errorf("Invalid quota policy: %#v", p.Quota) } + + if p.Plugin != nil && !isPolicyValid(p.Plugin.Policy) { + return nil, fmt.Errorf("Invalid plugin policy: %#v", p.Plugin) + } return p, nil } diff --git a/acl/policy_test.go b/acl/policy_test.go index 831e80076..48b5a6a35 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -30,6 +30,8 @@ func TestParse(t *testing.T) { Capabilities: []string{ NamespaceCapabilityListJobs, NamespaceCapabilityReadJob, + NamespaceCapabilityCSIListVolume, + NamespaceCapabilityCSIReadVolume, }, }, }, @@ -58,6 +60,9 @@ func TestParse(t *testing.T) { quota { policy = "read" } + plugin { + policy = "read" + } `, "", &Policy{ @@ -68,6 +73,8 @@ func TestParse(t *testing.T) { Capabilities: []string{ NamespaceCapabilityListJobs, NamespaceCapabilityReadJob, + NamespaceCapabilityCSIListVolume, + NamespaceCapabilityCSIReadVolume, }, }, { @@ -76,12 +83,16 @@ func TestParse(t *testing.T) { Capabilities: []string{ NamespaceCapabilityListJobs, NamespaceCapabilityReadJob, + NamespaceCapabilityCSIListVolume, + NamespaceCapabilityCSIReadVolume, NamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, NamespaceCapabilityReadLogs, NamespaceCapabilityReadFS, NamespaceCapabilityAllocExec, NamespaceCapabilityAllocLifecycle, + NamespaceCapabilityCSIMountVolume, + NamespaceCapabilityCSIWriteVolume, }, }, { @@ -104,6 +115,9 @@ func TestParse(t *testing.T) { Quota: &QuotaPolicy{ Policy: PolicyRead, }, + Plugin: &PluginPolicy{ + Policy: PolicyRead, + }, }, }, { diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 27f534877..773804e24 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -2340,16 +2340,17 @@ func TestCSI_GCVolumeClaims_Controller(t *testing.T) { err := state.UpsertNode(99, node) require.NoError(t, err) volId0 := uuid.Generate() + ns := structs.DefaultNamespace vols := []*structs.CSIVolume{{ ID: volId0, - Namespace: "notTheNamespace", + Namespace: ns, PluginID: "csi-plugin-example", AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter, AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, }} err = state.CSIVolumeRegister(100, vols) require.NoError(t, err) - vol, err := state.CSIVolumeByID(ws, volId0) + vol, err := state.CSIVolumeByID(ws, ns, volId0) require.NoError(t, err) require.True(t, vol.ControllerRequired) @@ -2387,11 +2388,11 @@ func TestCSI_GCVolumeClaims_Controller(t *testing.T) { require.NoError(t, err) // Claim the volumes and verify the claims were set - err = state.CSIVolumeClaim(105, volId0, alloc1, structs.CSIVolumeClaimWrite) + err = state.CSIVolumeClaim(105, ns, volId0, alloc1, structs.CSIVolumeClaimWrite) require.NoError(t, err) - err = state.CSIVolumeClaim(106, volId0, alloc2, structs.CSIVolumeClaimRead) + err = state.CSIVolumeClaim(106, ns, volId0, alloc2, structs.CSIVolumeClaimRead) require.NoError(t, err) - vol, err = state.CSIVolumeByID(ws, volId0) + vol, err = state.CSIVolumeByID(ws, ns, volId0) require.NoError(t, err) require.Len(t, vol.ReadAllocs, 1) require.Len(t, vol.WriteAllocs, 1) @@ -2425,7 +2426,7 @@ func TestCSI_GCVolumeClaims_Controller(t *testing.T) { require.NoError(t, err) // Verify both claims were released - vol, err = state.CSIVolumeByID(ws, volId0) + vol, err = state.CSIVolumeByID(ws, ns, volId0) require.NoError(t, err) require.Len(t, vol.ReadAllocs, 0) require.Len(t, vol.WriteAllocs, 0) diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 3a6981e1a..758570bec 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -95,13 +95,16 @@ func (v *CSIVolume) List(args *structs.CSIVolumeListRequest, reply *structs.CSIV return err } - allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIAccess) + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIListVolume, + acl.NamespaceCapabilityCSIReadVolume, + acl.NamespaceCapabilityCSIMountVolume, + acl.NamespaceCapabilityListJobs) aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, false) if err != nil { return err } - if !allowCSIAccess(aclObj, args.RequestNamespace()) { + if !allowVolume(aclObj, args.RequestNamespace()) { return structs.ErrPermissionDenied } @@ -163,7 +166,9 @@ func (v *CSIVolume) Get(args *structs.CSIVolumeGetRequest, reply *structs.CSIVol return err } - allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIAccess) + allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIReadVolume, + acl.NamespaceCapabilityCSIMountVolume, + acl.NamespaceCapabilityReadJob) aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, true) if err != nil { return err @@ -254,7 +259,7 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru return err } - allowCSIVolumeManagement := acl.NamespaceValidator(acl.NamespaceCapabilityCSICreateVolume) + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIWriteVolume) aclObj, err := v.srv.WriteACLObj(&args.WriteRequest, false) if err != nil { return err @@ -263,7 +268,7 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru metricsStart := time.Now() defer metrics.MeasureSince([]string{"nomad", "volume", "register"}, metricsStart) - if !allowCSIVolumeManagement(aclObj, args.RequestNamespace()) { + if !allowVolume(aclObj, args.RequestNamespace()) || !aclObj.AllowPluginRead() { return structs.ErrPermissionDenied } @@ -275,6 +280,7 @@ func (v *CSIVolume) Register(args *structs.CSIVolumeRegisterRequest, reply *stru if err = vol.Validate(); err != nil { return err } + plugin, err := v.srv.pluginValidateVolume(args, vol) if err != nil { return err @@ -304,7 +310,7 @@ func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply * return err } - allowCSIVolumeManagement := acl.NamespaceValidator(acl.NamespaceCapabilityCSICreateVolume) + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIWriteVolume) aclObj, err := v.srv.WriteACLObj(&args.WriteRequest, false) if err != nil { return err @@ -314,7 +320,7 @@ func (v *CSIVolume) Deregister(args *structs.CSIVolumeDeregisterRequest, reply * defer metrics.MeasureSince([]string{"nomad", "volume", "deregister"}, metricsStart) ns := args.RequestNamespace() - if !allowCSIVolumeManagement(aclObj, ns) { + if !allowVolume(aclObj, ns) { return structs.ErrPermissionDenied } @@ -338,7 +344,7 @@ func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CS return err } - allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIAccess) + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIMountVolume) aclObj, err := v.srv.WriteACLObj(&args.WriteRequest, true) if err != nil { return err @@ -347,7 +353,7 @@ func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CS metricsStart := time.Now() defer metrics.MeasureSince([]string{"nomad", "volume", "claim"}, metricsStart) - if !allowCSIAccess(aclObj, args.RequestNamespace()) { + if !allowVolume(aclObj, args.RequestNamespace()) || !aclObj.AllowPluginRead() { return structs.ErrPermissionDenied } @@ -374,6 +380,12 @@ func (v *CSIVolume) Claim(args *structs.CSIVolumeClaimRequest, reply *structs.CS return nil } +// allowCSIMount is called on Job register to check mount permission +func allowCSIMount(aclObj *acl.ACL, namespace string) bool { + return aclObj.AllowPluginRead() && + aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityCSIMountVolume) +} + // CSIPlugin wraps the structs.CSIPlugin with request data and server context type CSIPlugin struct { srv *Server diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 46987ed52..8548dc772 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -70,7 +70,7 @@ func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) { state := srv.fsm.State() state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) srv.config.ACLEnabled = true - policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) + policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIReadVolume}) validToken := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) codec := rpcClient(t, srv) @@ -319,10 +319,11 @@ func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) { ns := structs.DefaultNamespace state := srv.fsm.State() state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) - policy := mock.NamespacePolicy(ns, "", - []string{acl.NamespaceCapabilityCSICreateVolume, acl.NamespaceCapabilityCSIAccess}) - accessToken := mock.CreatePolicyAndToken(t, state, 1001, - acl.NamespaceCapabilityCSIAccess, policy) + + policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIMountVolume}) + + mock.PluginPolicy("read") + accessToken := mock.CreatePolicyAndToken(t, state, 1001, "claim", policy) + codec := rpcClient(t, srv) id0 := uuid.Generate() @@ -397,7 +398,8 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { srv.config.ACLEnabled = true codec := rpcClient(t, srv) - nsPolicy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) + nsPolicy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIReadVolume}) + + mock.PluginPolicy("read") nsTok := mock.CreatePolicyAndToken(t, state, 1000, "csi-access", nsPolicy) id0 := uuid.Generate() @@ -463,8 +465,9 @@ func TestCSIVolumeEndpoint_List(t *testing.T) { require.Equal(t, vols[1].ID, resp.Volumes[0].ID) // Query by PluginID in ms - msPolicy := mock.NamespacePolicy(ms, "", []string{acl.NamespaceCapabilityCSIAccess}) - msTok := mock.CreatePolicyAndToken(t, state, 1003, "csi-access", msPolicy) + msPolicy := mock.NamespacePolicy(ms, "", []string{acl.NamespaceCapabilityCSIListVolume}) + + mock.PluginPolicy("read") + msTok := mock.CreatePolicyAndToken(t, state, 1003, "csi-access2", msPolicy) req = &structs.CSIVolumeListRequest{ PluginID: "paddy", diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index 76c235a9f..10e0417a9 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -107,23 +107,28 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis // Validate Volume Permissions for _, tg := range args.Job.TaskGroups { for _, vol := range tg.Volumes { - if vol.Type != structs.VolumeTypeHost { + switch vol.Type { + case structs.VolumeTypeCSI: + if !allowCSIMount(aclObj, args.RequestNamespace()) { + return structs.ErrPermissionDenied + } + case structs.VolumeTypeHost: + // If a volume is readonly, then we allow access if the user has ReadOnly + // or ReadWrite access to the volume. Otherwise we only allow access if + // they have ReadWrite access. + if vol.ReadOnly { + if !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadOnly) && + !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadWrite) { + return structs.ErrPermissionDenied + } + } else { + if !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadWrite) { + return structs.ErrPermissionDenied + } + } + default: return structs.ErrPermissionDenied } - - // If a volume is readonly, then we allow access if the user has ReadOnly - // or ReadWrite access to the volume. Otherwise we only allow access if - // they have ReadWrite access. - if vol.ReadOnly { - if !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadOnly) && - !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadWrite) { - return structs.ErrPermissionDenied - } - } else { - if !aclObj.AllowHostVolumeOperation(vol.Source, acl.HostVolumeCapabilityMountReadWrite) { - return structs.ErrPermissionDenied - } - } } for _, t := range tg.Tasks { diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 47c78c3a1..4373a61f0 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -384,6 +384,10 @@ func TestJobEndpoint_Register_ACL(t *testing.T) { Source: "prod-ca-certs", ReadOnly: readonlyVolume, }, + "csi": { + Type: structs.VolumeTypeCSI, + Source: "prod-db", + }, } tg.Tasks[0].VolumeMounts = []*structs.VolumeMount{ @@ -404,11 +408,18 @@ func TestJobEndpoint_Register_ACL(t *testing.T) { volumesPolicyReadWrite := mock.HostVolumePolicy("prod-*", "", []string{acl.HostVolumeCapabilityMountReadWrite}) - submitJobWithVolumesReadWriteToken := mock.CreatePolicyAndToken(t, s1.State(), 1002, "test-submit-volumes", submitJobPolicy+"\n"+volumesPolicyReadWrite) + volumesPolicyCSIMount := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityCSIMountVolume}) + + mock.PluginPolicy("read") + + submitJobWithVolumesReadWriteToken := mock.CreatePolicyAndToken(t, s1.State(), 1002, "test-submit-volumes", submitJobPolicy+ + volumesPolicyReadWrite+ + volumesPolicyCSIMount) volumesPolicyReadOnly := mock.HostVolumePolicy("prod-*", "", []string{acl.HostVolumeCapabilityMountReadOnly}) - submitJobWithVolumesReadOnlyToken := mock.CreatePolicyAndToken(t, s1.State(), 1003, "test-submit-volumes-readonly", submitJobPolicy+"\n"+volumesPolicyReadOnly) + submitJobWithVolumesReadOnlyToken := mock.CreatePolicyAndToken(t, s1.State(), 1003, "test-submit-volumes-readonly", submitJobPolicy+ + volumesPolicyReadOnly+ + volumesPolicyCSIMount) cases := []struct { Name string diff --git a/nomad/mock/acl.go b/nomad/mock/acl.go index 599bed4b5..d41a76016 100644 --- a/nomad/mock/acl.go +++ b/nomad/mock/acl.go @@ -73,6 +73,11 @@ func QuotaPolicy(policy string) string { return fmt.Sprintf("quota {\n\tpolicy = %q\n}\n", policy) } +// PluginPolicy is a helper for generating the hcl for a given plugin policy. +func PluginPolicy(policy string) string { + return fmt.Sprintf("plugin {\n\tpolicy = %q\n}\n", policy) +} + // CreatePolicy creates a policy with the given name and rule. func CreatePolicy(t testing.T, state StateStore, index uint64, name, rule string) { t.Helper() diff --git a/nomad/search_endpoint_oss.go b/nomad/search_endpoint_oss.go index aaecf1b34..b4d80c634 100644 --- a/nomad/search_endpoint_oss.go +++ b/nomad/search_endpoint_oss.go @@ -44,7 +44,13 @@ func anySearchPerms(aclObj *acl.ACL, namespace string, context structs.Context) nodeRead := aclObj.AllowNodeRead() jobRead := aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityReadJob) - if !nodeRead && !jobRead { + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIListVolume, + acl.NamespaceCapabilityCSIReadVolume, + acl.NamespaceCapabilityListJobs, + acl.NamespaceCapabilityReadJob) + volRead := allowVolume(aclObj, namespace) + + if !nodeRead && !jobRead && !volRead { return false } @@ -60,6 +66,9 @@ func anySearchPerms(aclObj *acl.ACL, namespace string, context structs.Context) return false } } + if !volRead && context == structs.Volumes { + return false + } return true } @@ -83,6 +92,11 @@ func searchContexts(aclObj *acl.ACL, namespace string, context structs.Context) } jobRead := aclObj.AllowNsOp(namespace, acl.NamespaceCapabilityReadJob) + allowVolume := acl.NamespaceValidator(acl.NamespaceCapabilityCSIListVolume, + acl.NamespaceCapabilityCSIReadVolume, + acl.NamespaceCapabilityListJobs, + acl.NamespaceCapabilityReadJob) + volRead := allowVolume(aclObj, namespace) // Filter contexts down to those the ACL grants access to available := make([]structs.Context, 0, len(all)) @@ -96,6 +110,10 @@ func searchContexts(aclObj *acl.ACL, namespace string, context structs.Context) if aclObj.AllowNodeRead() { available = append(available, c) } + case structs.Volumes: + if volRead { + available = append(available, c) + } } } return available From 6b6ae6c2bdf2407f61992cefd5a0045744d1b76d Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Wed, 18 Mar 2020 15:29:03 -0400 Subject: [PATCH 122/126] csi: ACLs for plugin endpoints (#7380) * acl/policy: add PolicyList for global ACLs * acl/acl: plugin policy * acl/acl: maxPrivilege is required to allow "list" * nomad/csi_endpoint: enforce plugin access with PolicyPlugin * nomad/csi_endpoint: check job ACL swapped params * nomad/csi_endpoint_test: test alloc filtering * acl/policy: add namespace csi-register-plugin * nomad/job_endpoint: check csi-register-plugin ACL on registration * nomad/job_endpoint_test: add plugin job cases --- acl/acl.go | 26 +++++++++-- acl/policy.go | 48 +++++++++++--------- acl/policy_test.go | 22 ++++++++++ nomad/csi_endpoint.go | 37 ++++++++++------ nomad/csi_endpoint_test.go | 89 ++++++++++++++++++++++++++++++++++++-- nomad/job_endpoint.go | 6 +++ nomad/job_endpoint_test.go | 25 +++++++++++ nomad/testing.go | 3 +- 8 files changed, 212 insertions(+), 44 deletions(-) diff --git a/acl/acl.go b/acl/acl.go index 569158968..57b64814f 100644 --- a/acl/acl.go +++ b/acl/acl.go @@ -75,6 +75,8 @@ func maxPrivilege(a, b string) string { return PolicyWrite case a == PolicyRead || b == PolicyRead: return PolicyRead + case a == PolicyList || b == PolicyList: + return PolicyList default: return "" } @@ -483,11 +485,10 @@ func (a *ACL) AllowQuotaWrite() bool { // AllowPluginRead checks if read operations are allowed for all plugins func (a *ACL) AllowPluginRead() bool { - // ACL is nil only if ACLs are disabled - if a == nil { - return true - } switch { + // ACL is nil only if ACLs are disabled + case a == nil: + return true case a.management: return true case a.plugin == PolicyRead: @@ -497,6 +498,23 @@ func (a *ACL) AllowPluginRead() bool { } } +// AllowPluginList checks if list operations are allowed for all plugins +func (a *ACL) AllowPluginList() bool { + switch { + // ACL is nil only if ACLs are disabled + case a == nil: + return true + case a.management: + return true + case a.plugin == PolicyList: + return true + case a.plugin == PolicyRead: + return true + default: + return false + } +} + // IsManagement checks if this represents a management token func (a *ACL) IsManagement() bool { return a.management diff --git a/acl/policy.go b/acl/policy.go index 4287fa6d6..b4925577e 100644 --- a/acl/policy.go +++ b/acl/policy.go @@ -13,6 +13,7 @@ const ( // which always takes precedence and supercedes. PolicyDeny = "deny" PolicyRead = "read" + PolicyList = "list" PolicyWrite = "write" ) @@ -22,23 +23,22 @@ const ( // combined we take the union of all capabilities. If the deny capability is present, it // takes precedence and overwrites all other capabilities. - NamespaceCapabilityDeny = "deny" - NamespaceCapabilityListJobs = "list-jobs" - NamespaceCapabilityReadJob = "read-job" - NamespaceCapabilitySubmitJob = "submit-job" - NamespaceCapabilityDispatchJob = "dispatch-job" - NamespaceCapabilityReadLogs = "read-logs" - NamespaceCapabilityReadFS = "read-fs" - NamespaceCapabilityAllocExec = "alloc-exec" - NamespaceCapabilityAllocNodeExec = "alloc-node-exec" - NamespaceCapabilityAllocLifecycle = "alloc-lifecycle" - NamespaceCapabilitySentinelOverride = "sentinel-override" - NamespaceCapabilityPrivilegedTask = "privileged-task" - NamespaceCapabilityCSIAccess = "csi-access" - NamespaceCapabilityCSIWriteVolume = "csi-write-volume" - NamespaceCapabilityCSIReadVolume = "csi-read-volume" - NamespaceCapabilityCSIListVolume = "csi-list-volume" - NamespaceCapabilityCSIMountVolume = "csi-mount-volume" + NamespaceCapabilityDeny = "deny" + NamespaceCapabilityListJobs = "list-jobs" + NamespaceCapabilityReadJob = "read-job" + NamespaceCapabilitySubmitJob = "submit-job" + NamespaceCapabilityDispatchJob = "dispatch-job" + NamespaceCapabilityReadLogs = "read-logs" + NamespaceCapabilityReadFS = "read-fs" + NamespaceCapabilityAllocExec = "alloc-exec" + NamespaceCapabilityAllocNodeExec = "alloc-node-exec" + NamespaceCapabilityAllocLifecycle = "alloc-lifecycle" + NamespaceCapabilitySentinelOverride = "sentinel-override" + NamespaceCapabilityCSIRegisterPlugin = "csi-register-plugin" + NamespaceCapabilityCSIWriteVolume = "csi-write-volume" + NamespaceCapabilityCSIReadVolume = "csi-read-volume" + NamespaceCapabilityCSIListVolume = "csi-list-volume" + NamespaceCapabilityCSIMountVolume = "csi-mount-volume" ) var ( @@ -128,6 +128,15 @@ func isPolicyValid(policy string) bool { } } +func (p *PluginPolicy) isValid() bool { + switch p.Policy { + case PolicyDeny, PolicyRead, PolicyList: + return true + default: + return false + } +} + // isNamespaceCapabilityValid ensures the given capability is valid for a namespace policy func isNamespaceCapabilityValid(cap string) bool { switch cap { @@ -135,8 +144,7 @@ func isNamespaceCapabilityValid(cap string) bool { NamespaceCapabilitySubmitJob, NamespaceCapabilityDispatchJob, NamespaceCapabilityReadLogs, NamespaceCapabilityReadFS, NamespaceCapabilityAllocLifecycle, NamespaceCapabilityAllocExec, NamespaceCapabilityAllocNodeExec, - NamespaceCapabilityCSIAccess, // TODO(langmartin): remove after plugin caps are done - NamespaceCapabilityCSIReadVolume, NamespaceCapabilityCSIWriteVolume, NamespaceCapabilityCSIListVolume, NamespaceCapabilityCSIMountVolume: + NamespaceCapabilityCSIReadVolume, NamespaceCapabilityCSIWriteVolume, NamespaceCapabilityCSIListVolume, NamespaceCapabilityCSIMountVolume, NamespaceCapabilityCSIRegisterPlugin: return true // Separate the enterprise-only capabilities case NamespaceCapabilitySentinelOverride: @@ -282,7 +290,7 @@ func Parse(rules string) (*Policy, error) { return nil, fmt.Errorf("Invalid quota policy: %#v", p.Quota) } - if p.Plugin != nil && !isPolicyValid(p.Plugin.Policy) { + if p.Plugin != nil && !p.Plugin.isValid() { return nil, fmt.Errorf("Invalid plugin policy: %#v", p.Plugin) } return p, nil diff --git a/acl/policy_test.go b/acl/policy_test.go index 48b5a6a35..d8d21ac81 100644 --- a/acl/policy_test.go +++ b/acl/policy_test.go @@ -260,6 +260,28 @@ func TestParse(t *testing.T) { "Invalid host volume name", nil, }, + { + ` + plugin { + policy = "list" + } + `, + "", + &Policy{ + Plugin: &PluginPolicy{ + Policy: PolicyList, + }, + }, + }, + { + ` + plugin { + policy = "reader" + } + `, + "Invalid plugin policy", + nil, + }, } for idx, tc := range tcases { diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 758570bec..15d106d2c 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -398,13 +398,12 @@ func (v *CSIPlugin) List(args *structs.CSIPluginListRequest, reply *structs.CSIP return err } - allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIAccess) aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, false) if err != nil { return err } - if !allowCSIAccess(aclObj, args.RequestNamespace()) { + if !aclObj.AllowPluginList() { return structs.ErrPermissionDenied } @@ -430,9 +429,6 @@ func (v *CSIPlugin) List(args *structs.CSIPluginListRequest, reply *structs.CSIP } plug := raw.(*structs.CSIPlugin) - - // FIXME we should filter the ACL access for the plugin's - // namespace, but plugins don't currently have namespaces ps = append(ps, plug.Stub()) } @@ -448,16 +444,18 @@ func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPlu return err } - allowCSIAccess := acl.NamespaceValidator(acl.NamespaceCapabilityCSIAccess) aclObj, err := v.srv.QueryACLObj(&args.QueryOptions, false) if err != nil { return err } - if !allowCSIAccess(aclObj, args.RequestNamespace()) { + if !aclObj.AllowPluginRead() { return structs.ErrPermissionDenied } + withAllocs := aclObj == nil || + aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityReadJob) + metricsStart := time.Now() defer metrics.MeasureSince([]string{"nomad", "plugin", "get"}, metricsStart) @@ -470,15 +468,26 @@ func (v *CSIPlugin) Get(args *structs.CSIPluginGetRequest, reply *structs.CSIPlu return err } - if plug != nil { - plug, err = state.CSIPluginDenormalize(ws, plug.Copy()) - } - if err != nil { - return err + if plug == nil { + return nil } - // FIXME we should re-check the ACL access for the plugin's - // namespace, but plugins don't currently have namespaces + if withAllocs { + plug, err = state.CSIPluginDenormalize(ws, plug.Copy()) + if err != nil { + return err + } + + // Filter the allocation stubs by our namespace. withAllocs + // means we're allowed + var as []*structs.AllocListStub + for _, a := range plug.Allocations { + if a.Namespace == args.RequestNamespace() { + as = append(as, a) + } + } + plug.Allocations = as + } reply.Plugin = plug return v.srv.replySetIndex(csiPluginTable, &reply.QueryMeta) diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index 8548dc772..eec6ffc8a 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -490,8 +490,6 @@ func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { defer shutdown() testutil.WaitForLeader(t, srv.RPC) - ns := structs.DefaultNamespace - deleteNodes := CreateTestCSIPlugin(srv.fsm.State(), "foo") defer deleteNodes() @@ -501,8 +499,9 @@ func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { codec := rpcClient(t, srv) // Get the plugin back out - policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIAccess}) - getToken := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy) + listJob := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}) + policy := mock.PluginPolicy("read") + listJob + getToken := mock.CreatePolicyAndToken(t, state, 1001, "plugin-read", policy) req2 := &structs.CSIPluginGetRequest{ ID: "foo", @@ -515,6 +514,13 @@ func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { err := msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) require.NoError(t, err) + // Get requires plugin-read, not plugin-list + lPolicy := mock.PluginPolicy("list") + lTok := mock.CreatePolicyAndToken(t, state, 1003, "plugin-list", lPolicy) + req2.AuthToken = lTok.SecretID + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) + require.Error(t, err, "Permission denied") + // List plugins req3 := &structs.CSIPluginListRequest{ QueryOptions: structs.QueryOptions{ @@ -532,15 +538,90 @@ func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(resp3.Plugins)) + // List allows plugin-list + req3.AuthToken = lTok.SecretID + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.List", req3, resp3) + require.NoError(t, err) + require.Equal(t, 1, len(resp3.Plugins)) + // Deregistration works deleteNodes() // Plugin is missing + req2.AuthToken = getToken.SecretID err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2) require.NoError(t, err) require.Nil(t, resp2.Plugin) } +// TestCSIPluginEndpoint_ACLNamespaceAlloc checks that allocations are filtered by namespace +// when getting plugins, and enforcing that the client has job-read ACL access to the +// namespace of the allocations +func TestCSIPluginEndpoint_ACLNamespaceAlloc(t *testing.T) { + t.Parallel() + srv, shutdown := TestServer(t, func(c *Config) { + c.NumSchedulers = 0 // Prevent automatic dequeue + }) + defer shutdown() + testutil.WaitForLeader(t, srv.RPC) + state := srv.fsm.State() + + // Setup ACLs + state.BootstrapACLTokens(1, 0, mock.ACLManagementToken()) + srv.config.ACLEnabled = true + codec := rpcClient(t, srv) + listJob := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}) + policy := mock.PluginPolicy("read") + listJob + getToken := mock.CreatePolicyAndToken(t, state, 1001, "plugin-read", policy) + + // Create the plugin and then some allocations to pretend to be the allocs that are + // running the plugin tasks + deleteNodes := CreateTestCSIPlugin(srv.fsm.State(), "foo") + defer deleteNodes() + + plug, _ := state.CSIPluginByID(memdb.NewWatchSet(), "foo") + var allocs []*structs.Allocation + for _, info := range plug.Controllers { + a := mock.Alloc() + a.ID = info.AllocID + allocs = append(allocs, a) + } + for _, info := range plug.Nodes { + a := mock.Alloc() + a.ID = info.AllocID + allocs = append(allocs, a) + } + + require.Equal(t, 3, len(allocs)) + allocs[0].Namespace = "notTheNamespace" + + err := state.UpsertAllocs(1003, allocs) + require.NoError(t, err) + + req := &structs.CSIPluginGetRequest{ + ID: "foo", + QueryOptions: structs.QueryOptions{ + Region: "global", + AuthToken: getToken.SecretID, + }, + } + resp := &structs.CSIPluginGetResponse{} + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req, resp) + require.NoError(t, err) + require.Equal(t, 2, len(resp.Plugin.Allocations)) + + for _, a := range resp.Plugin.Allocations { + require.Equal(t, structs.DefaultNamespace, a.Namespace) + } + + p2 := mock.PluginPolicy("read") + t2 := mock.CreatePolicyAndToken(t, state, 1004, "plugin-read2", p2) + req.AuthToken = t2.SecretID + err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req, resp) + require.NoError(t, err) + require.Equal(t, 0, len(resp.Plugin.Allocations)) +} + func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) { srv, shutdown := TestServer(t, func(c *Config) {}) defer shutdown() diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index 10e0417a9..a0b17323a 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -139,6 +139,12 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis return structs.ErrPermissionDenied } } + + if t.CSIPluginConfig != nil { + if !aclObj.AllowNsOp(args.RequestNamespace(), acl.NamespaceCapabilityCSIRegisterPlugin) { + return structs.ErrPermissionDenied + } + } } } diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index 4373a61f0..5954a2bc4 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -402,6 +402,16 @@ func TestJobEndpoint_Register_ACL(t *testing.T) { return j } + newCSIPluginJob := func() *structs.Job { + j := mock.Job() + t := j.TaskGroups[0].Tasks[0] + t.CSIPluginConfig = &structs.TaskCSIPluginConfig{ + ID: "foo", + Type: "node", + } + return j + } + submitJobPolicy := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob, acl.NamespaceCapabilitySubmitJob}) submitJobToken := mock.CreatePolicyAndToken(t, s1.State(), 1001, "test-submit-job", submitJobPolicy) @@ -421,6 +431,9 @@ func TestJobEndpoint_Register_ACL(t *testing.T) { volumesPolicyReadOnly+ volumesPolicyCSIMount) + pluginPolicy := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityCSIRegisterPlugin}) + pluginToken := mock.CreatePolicyAndToken(t, s1.State(), 1005, "test-csi-register-plugin", submitJobPolicy+pluginPolicy) + cases := []struct { Name string Job *structs.Job @@ -463,6 +476,18 @@ func TestJobEndpoint_Register_ACL(t *testing.T) { Token: submitJobWithVolumesReadOnlyToken.SecretID, ErrExpected: false, }, + { + Name: "with a token that can submit a job, plugin rejected", + Job: newCSIPluginJob(), + Token: submitJobToken.SecretID, + ErrExpected: true, + }, + { + Name: "with a token that also has csi-register-plugin, accepted", + Job: newCSIPluginJob(), + Token: pluginToken.SecretID, + ErrExpected: false, + }, } for _, tt := range cases { diff --git a/nomad/testing.go b/nomad/testing.go index c5593eeaa..9d63bbf6f 100644 --- a/nomad/testing.go +++ b/nomad/testing.go @@ -187,12 +187,11 @@ func CreateTestCSIPlugin(s *state.StateStore, id string) func() { } // Install healthy plugin fingerprinting results - allocID := uuid.Generate() for _, n := range ns[1:] { n.CSINodePlugins = map[string]*structs.CSIInfo{ id: { PluginID: id, - AllocID: allocID, + AllocID: uuid.Generate(), Healthy: true, HealthDescription: "healthy", RequiresControllerPlugin: true, From 5a0bcd39d179d44f7df062a7a3553c12d5bcda4d Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 19 Mar 2020 11:27:48 -0400 Subject: [PATCH 123/126] csi: dynamically update plugin registration (#7386) Allow for faster updates to plugin status when allocations become terminal by listening for register/deregister events from the dynamic plugin registry (which in turn are triggered by the plugin supervisor hook). The deregistration function closures that we pass up to the CSI plugin manager don't properly close over the name and type of the registration, causing monolith-type plugins to deregister only one of their two plugins on alloc shutdown. Rebind plugin supervisor deregistration targets to fix that. Includes log message and comment improvements --- .../taskrunner/plugin_supervisor_hook.go | 8 +- client/dynamicplugins/registry_test.go | 24 ++++ client/pluginmanager/csimanager/instance.go | 4 +- client/pluginmanager/csimanager/manager.go | 122 +++++++++++++----- .../pluginmanager/csimanager/manager_test.go | 49 +++++++ 5 files changed, 168 insertions(+), 39 deletions(-) diff --git a/client/allocrunner/taskrunner/plugin_supervisor_hook.go b/client/allocrunner/taskrunner/plugin_supervisor_hook.go index 8db05723c..7d730ead7 100644 --- a/client/allocrunner/taskrunner/plugin_supervisor_hook.go +++ b/client/allocrunner/taskrunner/plugin_supervisor_hook.go @@ -309,10 +309,14 @@ func (h *csiPluginSupervisorHook) registerPlugin(socketPath string) (func(), err return nil, err } + // need to rebind these so that each deregistration function + // closes over its own registration + rname := reg.Name + rtype := reg.Type deregistrationFns = append(deregistrationFns, func() { - err := h.runner.dynamicRegistry.DeregisterPlugin(reg.Type, reg.Name) + err := h.runner.dynamicRegistry.DeregisterPlugin(rtype, rname) if err != nil { - h.logger.Error("failed to deregister csi plugin", "name", reg.Name, "type", reg.Type, "error", err) + h.logger.Error("failed to deregister csi plugin", "name", rname, "type", rtype, "error", err) } }) } diff --git a/client/dynamicplugins/registry_test.go b/client/dynamicplugins/registry_test.go index 3e6513d18..a2621c05f 100644 --- a/client/dynamicplugins/registry_test.go +++ b/client/dynamicplugins/registry_test.go @@ -171,6 +171,30 @@ func TestDynamicRegistry_DispensePlugin_Works(t *testing.T) { require.NoError(t, err) } +func TestDynamicRegistry_IsolatePluginTypes(t *testing.T) { + t.Parallel() + r := NewRegistry(nil, nil) + + err := r.RegisterPlugin(&PluginInfo{ + Type: PluginTypeCSIController, + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + require.NoError(t, err) + + err = r.RegisterPlugin(&PluginInfo{ + Type: PluginTypeCSINode, + Name: "my-plugin", + ConnectionInfo: &PluginConnectionInfo{}, + }) + require.NoError(t, err) + + err = r.DeregisterPlugin(PluginTypeCSIController, "my-plugin") + require.NoError(t, err) + require.Equal(t, len(r.ListPlugins(PluginTypeCSINode)), 1) + require.Equal(t, len(r.ListPlugins(PluginTypeCSIController)), 0) +} + func TestDynamicRegistry_StateStore(t *testing.T) { t.Parallel() dispenseFn := func(i *PluginInfo) (interface{}, error) { diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index 82ff914c6..4386923fc 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -86,7 +86,7 @@ func (i *instanceManager) run() { func (i *instanceManager) setupVolumeManager() { if i.info.Type != dynamicplugins.PluginTypeCSINode { - i.logger.Debug("Skipping volume manager setup - not managing a Node plugin", "type", i.info.Type) + i.logger.Debug("not a node plugin, skipping volume manager setup", "type", i.info.Type) return } @@ -95,7 +95,7 @@ func (i *instanceManager) setupVolumeManager() { return case <-i.fp.hadFirstSuccessfulFingerprintCh: i.volumeManager = newVolumeManager(i.logger, i.client, i.mountPoint, i.containerMountPoint, i.fp.requiresStaging) - i.logger.Debug("Setup volume manager") + i.logger.Debug("volume manager setup complete") close(i.volumeManagerSetupCh) return } diff --git a/client/pluginmanager/csimanager/manager.go b/client/pluginmanager/csimanager/manager.go index d79b1b339..60b6b309b 100644 --- a/client/pluginmanager/csimanager/manager.go +++ b/client/pluginmanager/csimanager/manager.go @@ -85,63 +85,113 @@ func (c *csiManager) MounterForVolume(ctx context.Context, vol *structs.CSIVolum // Run starts a plugin manager and should return early func (c *csiManager) Run() { - // Ensure we have at least one full sync before starting - c.resyncPluginsFromRegistry("csi-controller") - c.resyncPluginsFromRegistry("csi-node") go c.runLoop() } func (c *csiManager) runLoop() { - // TODO: Subscribe to the events channel from the registry to receive dynamic - // updates without a full resync - timer := time.NewTimer(0) + timer := time.NewTimer(0) // ensure we sync immediately in first pass + controllerUpdates := c.registry.PluginsUpdatedCh(c.shutdownCtx, "csi-controller") + nodeUpdates := c.registry.PluginsUpdatedCh(c.shutdownCtx, "csi-node") for { select { - case <-c.shutdownCtx.Done(): - close(c.shutdownCh) - return case <-timer.C: c.resyncPluginsFromRegistry("csi-controller") c.resyncPluginsFromRegistry("csi-node") timer.Reset(c.pluginResyncPeriod) + case event := <-controllerUpdates: + c.handlePluginEvent(event) + case event := <-nodeUpdates: + c.handlePluginEvent(event) + case <-c.shutdownCtx.Done(): + close(c.shutdownCh) + return } } } -// resyncPluginsFromRegistry does a full sync of the running instance managers -// against those in the registry. Eventually we should primarily be using -// update events from the registry, but this is an ok fallback for now. +// resyncPluginsFromRegistry does a full sync of the running instance +// managers against those in the registry. we primarily will use update +// events from the registry. func (c *csiManager) resyncPluginsFromRegistry(ptype string) { plugins := c.registry.ListPlugins(ptype) seen := make(map[string]struct{}, len(plugins)) + // For every plugin in the registry, ensure that we have an existing plugin + // running. Also build the map of valid plugin names. + // Note: monolith plugins that run as both controllers and nodes get a + // separate instance manager for both modes. + for _, plugin := range plugins { + seen[plugin.Name] = struct{}{} + c.ensureInstance(plugin) + } + + // For every instance manager, if we did not find it during the plugin + // iterator, shut it down and remove it from the table. + instances := c.instancesForType(ptype) + for name, mgr := range instances { + if _, ok := seen[name]; !ok { + c.ensureNoInstance(mgr.info) + } + } +} + +// handlePluginEvent syncs a single event against the plugin registry +func (c *csiManager) handlePluginEvent(event *dynamicplugins.PluginUpdateEvent) { + if event == nil { + return + } + c.logger.Trace("dynamic plugin event", + "event", event.EventType, + "plugin_id", event.Info.Name, + "plugin_alloc_id", event.Info.AllocID) + + switch event.EventType { + case dynamicplugins.EventTypeRegistered: + c.ensureInstance(event.Info) + case dynamicplugins.EventTypeDeregistered: + c.ensureNoInstance(event.Info) + default: + c.logger.Error("received unknown dynamic plugin event type", + "type", event.EventType) + } +} + +// Ensure we have an instance manager for the plugin and add it to +// the CSI manager's tracking table for that plugin type. +func (c *csiManager) ensureInstance(plugin *dynamicplugins.PluginInfo) { + name := plugin.Name + ptype := plugin.Type + instances := c.instancesForType(ptype) + if _, ok := instances[name]; !ok { + c.logger.Debug("detected new CSI plugin", "name", name, "type", ptype) + mgr := newInstanceManager(c.logger, c.updateNodeCSIInfoFunc, plugin) + instances[name] = mgr + mgr.run() + } +} + +// Shut down the instance manager for a plugin and remove it from +// the CSI manager's tracking table for that plugin type. +func (c *csiManager) ensureNoInstance(plugin *dynamicplugins.PluginInfo) { + name := plugin.Name + ptype := plugin.Type + instances := c.instancesForType(ptype) + if mgr, ok := instances[name]; ok { + c.logger.Debug("shutting down CSI plugin", "name", name, "type", ptype) + mgr.shutdown() + delete(instances, name) + } +} + +// Get the instance managers table for a specific plugin type, +// ensuring it's been initialized if it doesn't exist. +func (c *csiManager) instancesForType(ptype string) map[string]*instanceManager { pluginMap, ok := c.instances[ptype] if !ok { pluginMap = make(map[string]*instanceManager) c.instances[ptype] = pluginMap } - - // For every plugin in the registry, ensure that we have an existing plugin - // running. Also build the map of valid plugin names. - for _, plugin := range plugins { - seen[plugin.Name] = struct{}{} - if _, ok := pluginMap[plugin.Name]; !ok { - c.logger.Debug("detected new CSI plugin", "name", plugin.Name, "type", ptype) - mgr := newInstanceManager(c.logger, c.updateNodeCSIInfoFunc, plugin) - pluginMap[plugin.Name] = mgr - mgr.run() - } - } - - // For every instance manager, if we did not find it during the plugin - // iterator, shut it down and remove it from the table. - for name, mgr := range pluginMap { - if _, ok := seen[name]; !ok { - c.logger.Info("shutting down CSI plugin", "name", name, "type", ptype) - mgr.shutdown() - delete(pluginMap, name) - } - } + return pluginMap } // Shutdown should gracefully shutdown all plugins managed by the manager. @@ -150,7 +200,9 @@ func (c *csiManager) Shutdown() { // Shut down the run loop c.shutdownCtxCancelFn() - // Wait for plugin manager shutdown to complete + // Wait for plugin manager shutdown to complete so that we + // don't try to shutdown instance managers while runLoop is + // doing a resync <-c.shutdownCh // Shutdown all the instance managers in parallel diff --git a/client/pluginmanager/csimanager/manager_test.go b/client/pluginmanager/csimanager/manager_test.go index 24b854ab5..f6c3f381d 100644 --- a/client/pluginmanager/csimanager/manager_test.go +++ b/client/pluginmanager/csimanager/manager_test.go @@ -110,3 +110,52 @@ func TestManager_DeregisterPlugin(t *testing.T) { return !ok }, 5*time.Second, 10*time.Millisecond) } + +// TestManager_MultiplePlugins ensures that multiple plugins with the same +// name but different types (as found with monolith plugins) don't interfere +// with each other. +func TestManager_MultiplePlugins(t *testing.T) { + registry := setupRegistry() + defer registry.Shutdown() + + require.NotNil(t, registry) + + cfg := &Config{ + Logger: testlog.HCLogger(t), + DynamicRegistry: registry, + UpdateNodeCSIInfoFunc: func(string, *structs.CSIInfo) {}, + PluginResyncPeriod: 500 * time.Millisecond, + } + pm := New(cfg).(*csiManager) + defer pm.Shutdown() + + require.NotNil(t, pm.registry) + + err := registry.RegisterPlugin(fakePlugin) + require.Nil(t, err) + + fakeNodePlugin := *fakePlugin + fakeNodePlugin.Type = "csi-node" + err = registry.RegisterPlugin(&fakeNodePlugin) + require.Nil(t, err) + + pm.Run() + + require.Eventually(t, func() bool { + _, ok := pm.instances[fakePlugin.Type][fakePlugin.Name] + return ok + }, 5*time.Second, 10*time.Millisecond) + + require.Eventually(t, func() bool { + _, ok := pm.instances[fakeNodePlugin.Type][fakeNodePlugin.Name] + return ok + }, 5*time.Second, 10*time.Millisecond) + + err = registry.DeregisterPlugin(fakePlugin.Type, fakePlugin.Name) + require.Nil(t, err) + + require.Eventually(t, func() bool { + _, ok := pm.instances[fakePlugin.Type][fakePlugin.Name] + return !ok + }, 5*time.Second, 10*time.Millisecond) +} From 32b94bf1a47d410de367deb20c0cb58281dc3d6d Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 19 Mar 2020 17:09:49 -0400 Subject: [PATCH 124/126] csi: stub fingerprint on instance manager shutdown (#7388) Run the plugin fingerprint one last time with a closed client during instance manager shutdown. This will return quickly and will give us a correctly-populated `PluginInfo` marked as unhealthy so the Nomad client can update the server about plugin health. --- client/pluginmanager/csimanager/instance.go | 21 +++++-- .../pluginmanager/csimanager/instance_test.go | 60 +++++++++++++++++-- plugins/csi/fake/client.go | 40 +++++++++++++ 3 files changed, 112 insertions(+), 9 deletions(-) diff --git a/client/pluginmanager/csimanager/instance.go b/client/pluginmanager/csimanager/instance.go index 4386923fc..95f88221f 100644 --- a/client/pluginmanager/csimanager/instance.go +++ b/client/pluginmanager/csimanager/instance.go @@ -126,15 +126,26 @@ func (i *instanceManager) runLoop() { i.client.Close() i.client = nil } - close(i.shutdownCh) - return - case <-timer.C: - ctx, cancelFn := i.requestCtxWithTimeout(managerFingerprintInterval) + // run one last fingerprint so that we mark the plugin as unhealthy. + // the client has been closed so this will return quickly with the + // plugin's basic info + ctx, cancelFn := i.requestCtxWithTimeout(time.Second) info := i.fp.fingerprint(ctx) cancelFn() - i.updater(i.info.Name, info) + if info != nil { + i.updater(i.info.Name, info) + } + close(i.shutdownCh) + return + case <-timer.C: + ctx, cancelFn := i.requestCtxWithTimeout(managerFingerprintInterval) + info := i.fp.fingerprint(ctx) + cancelFn() + if info != nil { + i.updater(i.info.Name, info) + } timer.Reset(managerFingerprintInterval) } } diff --git a/client/pluginmanager/csimanager/instance_test.go b/client/pluginmanager/csimanager/instance_test.go index c6b53043f..6a8658df5 100644 --- a/client/pluginmanager/csimanager/instance_test.go +++ b/client/pluginmanager/csimanager/instance_test.go @@ -1,11 +1,18 @@ package csimanager import ( + "context" + "fmt" + "sync" "testing" + "time" "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/helper/testlog" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" "github.com/hashicorp/nomad/plugins/csi/fake" + "github.com/stretchr/testify/require" ) func setupTestNodeInstanceManager(t *testing.T) (*fake.Client, *instanceManager) { @@ -21,10 +28,55 @@ func setupTestNodeInstanceManager(t *testing.T) (*fake.Client, *instanceManager) info: pinfo, client: tp, fp: &pluginFingerprinter{ - logger: logger.Named("fingerprinter"), - info: pinfo, - client: tp, - fingerprintNode: true, + logger: logger.Named("fingerprinter"), + info: pinfo, + client: tp, + fingerprintNode: true, + hadFirstSuccessfulFingerprintCh: make(chan struct{}), }, } } + +func TestInstanceManager_Shutdown(t *testing.T) { + + var pluginHealth bool + var lock sync.Mutex + ctx, cancelFn := context.WithCancel(context.Background()) + client, im := setupTestNodeInstanceManager(t) + im.shutdownCtx = ctx + im.shutdownCtxCancelFn = cancelFn + im.shutdownCh = make(chan struct{}) + im.updater = func(_ string, info *structs.CSIInfo) { + fmt.Println(info) + lock.Lock() + defer lock.Unlock() + pluginHealth = info.Healthy + } + + // set up a mock successful fingerprint so that we can get + // a healthy plugin before shutting down + client.NextPluginGetCapabilitiesResponse = &csi.PluginCapabilitySet{} + client.NextPluginGetCapabilitiesErr = nil + client.NextNodeGetInfoResponse = &csi.NodeGetInfoResponse{NodeID: "foo"} + client.NextNodeGetInfoErr = nil + client.NextNodeGetCapabilitiesResponse = &csi.NodeCapabilitySet{} + client.NextNodeGetCapabilitiesErr = nil + client.NextPluginProbeResponse = true + + go im.runLoop() + + require.Eventually(t, func() bool { + lock.Lock() + defer lock.Unlock() + return pluginHealth + }, 1*time.Second, 10*time.Millisecond) + + cancelFn() // fires im.shutdown() + + require.Eventually(t, func() bool { + lock.Lock() + defer lock.Unlock() + return !pluginHealth + }, 1*time.Second, 10*time.Millisecond) + +} diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index b18ec6f6e..162b6bb73 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -5,6 +5,7 @@ package fake import ( "context" "errors" + "fmt" "sync" "github.com/hashicorp/nomad/plugins/base" @@ -232,5 +233,44 @@ func (c *Client) NodeUnpublishVolume(ctx context.Context, volumeID, targetPath s // Shutdown the client and ensure any connections are cleaned up. func (c *Client) Close() error { + + c.NextPluginInfoResponse = nil + c.NextPluginInfoErr = fmt.Errorf("closed client") + + c.NextPluginProbeResponse = false + c.NextPluginProbeErr = fmt.Errorf("closed client") + + c.NextPluginGetInfoNameResponse = "" + c.NextPluginGetInfoVersionResponse = "" + c.NextPluginGetInfoErr = fmt.Errorf("closed client") + + c.NextPluginGetCapabilitiesResponse = nil + c.NextPluginGetCapabilitiesErr = fmt.Errorf("closed client") + + c.NextControllerGetCapabilitiesResponse = nil + c.NextControllerGetCapabilitiesErr = fmt.Errorf("closed client") + + c.NextControllerPublishVolumeResponse = nil + c.NextControllerPublishVolumeErr = fmt.Errorf("closed client") + + c.NextControllerUnpublishVolumeResponse = nil + c.NextControllerUnpublishVolumeErr = fmt.Errorf("closed client") + + c.NextControllerValidateVolumeErr = fmt.Errorf("closed client") + + c.NextNodeGetCapabilitiesResponse = nil + c.NextNodeGetCapabilitiesErr = fmt.Errorf("closed client") + + c.NextNodeGetInfoResponse = nil + c.NextNodeGetInfoErr = fmt.Errorf("closed client") + + c.NextNodeStageVolumeErr = fmt.Errorf("closed client") + + c.NextNodeUnstageVolumeErr = fmt.Errorf("closed client") + + c.NextNodePublishVolumeErr = fmt.Errorf("closed client") + + c.NextNodeUnpublishVolumeErr = fmt.Errorf("closed client") + return nil } From d1f43a5feaa1be0c8256038ada5a4738c85ae234 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 23 Mar 2020 11:02:34 -0400 Subject: [PATCH 125/126] csi: improve error messages from scheduler (#7426) --- scheduler/feasible.go | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/scheduler/feasible.go b/scheduler/feasible.go index 2b67a41f6..35ce4f57c 100644 --- a/scheduler/feasible.go +++ b/scheduler/feasible.go @@ -15,13 +15,14 @@ import ( ) const ( - FilterConstraintHostVolumes = "missing compatible host volumes" - FilterConstraintCSIPlugins = "missing CSI plugins" - FilterConstraintCSIVolumesLookupFailed = "CSI volume lookup failed" - FilterConstraintCSIVolumeNotFoundTemplate = "missing CSI Volume %s" - FilterConstraintCSIVolumeExhaustedTemplate = "CSI Volume %s has exhausted its available claims" - FilterConstraintDrivers = "missing drivers" - FilterConstraintDevices = "missing devices" + FilterConstraintHostVolumes = "missing compatible host volumes" + FilterConstraintCSIPlugins = "missing CSI plugins" + FilterConstraintCSIVolumesLookupFailed = "CSI volume lookup failed" + FilterConstraintCSIVolumeNotFoundTemplate = "missing CSI Volume %s" + FilterConstraintCSIVolumeNoReadTemplate = "CSI volume %s has exhausted its available reader claims" + FilterConstraintCSIVolumeNoWriteTemplate = "CSI volume %s has exhausted its available writer claims or is read-only" + FilterConstraintDrivers = "missing drivers" + FilterConstraintDevices = "missing devices" ) // FeasibleIterator is used to iteratively yield nodes that @@ -255,12 +256,13 @@ func (c *CSIVolumeChecker) hasPlugins(n *structs.Node) (bool, string) { if !(ok && plugin.Healthy) { return false, FilterConstraintCSIPlugins } - - if (req.ReadOnly && !vol.CanReadOnly()) || - !vol.CanWrite() { - return false, FilterConstraintCSIPlugins + if req.ReadOnly { + if !vol.CanReadOnly() { + return false, fmt.Sprintf(FilterConstraintCSIVolumeNoReadTemplate, vol.ID) + } + } else if !vol.CanWrite() { + return false, fmt.Sprintf(FilterConstraintCSIVolumeNoWriteTemplate, vol.ID) } - } return true, "" From e1004447402f1e092538d1ebf33ec676e0d11cde Mon Sep 17 00:00:00 2001 From: Lang Martin Date: Mon, 23 Mar 2020 13:55:26 -0400 Subject: [PATCH 126/126] csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption --- api/csi.go | 18 +++-- api/tasks.go | 10 +-- client/allocrunner/csi_hook.go | 1 + client/pluginmanager/csimanager/interface.go | 1 + client/pluginmanager/csimanager/volume.go | 23 +++++- .../pluginmanager/csimanager/volume_test.go | 71 +++++++++++++++++-- command/agent/job_endpoint.go | 7 ++ command/alloc_status.go | 2 +- command/node_status.go | 12 ++-- command/volume_status_csi.go | 38 ++++++++++ jobspec/parse_group.go | 36 ++-------- jobspec/parse_test.go | 27 ++++++- jobspec/test-fixtures/basic.hcl | 21 +++++- nomad/csi_endpoint.go | 3 - nomad/structs/csi.go | 53 ++++++++++++++ nomad/structs/volumes.go | 15 ++-- plugins/csi/fake/client.go | 2 + plugins/csi/plugin.go | 56 ++++----------- 18 files changed, 287 insertions(+), 109 deletions(-) diff --git a/api/csi.go b/api/csi.go index df1c372d3..b78019659 100644 --- a/api/csi.go +++ b/api/csi.go @@ -81,15 +81,22 @@ const ( CSIVolumeAccessModeMultiNodeMultiWriter CSIVolumeAccessMode = "multi-node-multi-writer" ) +type CSIMountOptions struct { + FSType string `hcl:"fs_type"` + MountFlags []string `hcl:"mount_flags"` + ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` // report unexpected keys +} + // CSIVolume is used for serialization, see also nomad/structs/csi.go type CSIVolume struct { - ID string `hcl:"id"` - Name string `hcl:"name"` - ExternalID string `hcl:"external_id"` - Namespace string `hcl:"namespace"` - Topologies []*CSITopology `hcl:"topologies"` + ID string + Name string + ExternalID string `hcl:"external_id"` + Namespace string + Topologies []*CSITopology AccessMode CSIVolumeAccessMode `hcl:"access_mode"` AttachmentMode CSIVolumeAttachmentMode `hcl:"attachment_mode"` + MountOptions *CSIMountOptions `hcl:"mount_options"` // Allocations, tracking claim status ReadAllocs map[string]*Allocation @@ -151,6 +158,7 @@ type CSIVolumeListStub struct { Topologies []*CSITopology AccessMode CSIVolumeAccessMode AttachmentMode CSIVolumeAttachmentMode + MountOptions *CSIMountOptions Schedulable bool PluginID string Provider string diff --git a/api/tasks.go b/api/tasks.go index 7e68fea25..2e6e64d88 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -377,10 +377,12 @@ func (m *MigrateStrategy) Copy() *MigrateStrategy { // VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use. type VolumeRequest struct { - Name string - Type string - Source string - ReadOnly bool `mapstructure:"read_only"` + Name string + Type string + Source string + ReadOnly bool `hcl:"read_only"` + MountOptions *CSIMountOptions `hcl:"mount_options"` + ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` } const ( diff --git a/client/allocrunner/csi_hook.go b/client/allocrunner/csi_hook.go index 02b0e4708..ac16cbbe5 100644 --- a/client/allocrunner/csi_hook.go +++ b/client/allocrunner/csi_hook.go @@ -48,6 +48,7 @@ func (c *csiHook) Prerun() error { ReadOnly: pair.request.ReadOnly, AttachmentMode: string(pair.volume.AttachmentMode), AccessMode: string(pair.volume.AccessMode), + MountOptions: pair.request.MountOptions, } mountInfo, err := mounter.MountVolume(ctx, pair.volume, c.alloc, usageOpts, pair.publishContext) diff --git a/client/pluginmanager/csimanager/interface.go b/client/pluginmanager/csimanager/interface.go index f2458fd11..c6f97cd69 100644 --- a/client/pluginmanager/csimanager/interface.go +++ b/client/pluginmanager/csimanager/interface.go @@ -22,6 +22,7 @@ type UsageOptions struct { ReadOnly bool AttachmentMode string AccessMode string + MountOptions *structs.CSIMountOptions } // ToFS is used by a VolumeManager to construct the path to where a volume diff --git a/client/pluginmanager/csimanager/volume.go b/client/pluginmanager/csimanager/volume.go index e76edae78..6243af4b6 100644 --- a/client/pluginmanager/csimanager/volume.go +++ b/client/pluginmanager/csimanager/volume.go @@ -117,6 +117,25 @@ func (v *volumeManager) ensureAllocDir(vol *structs.CSIVolume, alloc *structs.Al return allocPath, !isNotMount, nil } +func volumeCapability(vol *structs.CSIVolume, usage *UsageOptions) (*csi.VolumeCapability, error) { + capability, err := csi.VolumeCapabilityFromStructs(vol.AttachmentMode, vol.AccessMode) + if err != nil { + return nil, err + } + + var opts *structs.CSIMountOptions + if vol.MountOptions == nil { + opts = usage.MountOptions + } else { + opts = vol.MountOptions.Copy() + opts.Merge(usage.MountOptions) + } + + capability.MountVolume = opts + + return capability, nil +} + // stageVolume prepares a volume for use by allocations. When a plugin exposes // the STAGE_UNSTAGE_VOLUME capability it MUST be called once-per-volume for a // given usage mode before the volume can be NodePublish-ed. @@ -136,7 +155,7 @@ func (v *volumeManager) stageVolume(ctx context.Context, vol *structs.CSIVolume, return nil } - capability, err := csi.VolumeCapabilityFromStructs(vol.AttachmentMode, vol.AccessMode) + capability, err := volumeCapability(vol, usage) if err != nil { return err } @@ -175,7 +194,7 @@ func (v *volumeManager) publishVolume(ctx context.Context, vol *structs.CSIVolum return &MountInfo{Source: hostTargetPath}, nil } - capabilities, err := csi.VolumeCapabilityFromStructs(vol.AttachmentMode, vol.AccessMode) + capabilities, err := volumeCapability(vol, usage) if err != nil { return nil, err } diff --git a/client/pluginmanager/csimanager/volume_test.go b/client/pluginmanager/csimanager/volume_test.go index 148c55249..93aa2447a 100644 --- a/client/pluginmanager/csimanager/volume_test.go +++ b/client/pluginmanager/csimanager/volume_test.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/plugins/csi" csifake "github.com/hashicorp/nomad/plugins/csi/fake" "github.com/stretchr/testify/require" ) @@ -247,13 +248,14 @@ func TestVolumeManager_unstageVolume(t *testing.T) { func TestVolumeManager_publishVolume(t *testing.T) { t.Parallel() cases := []struct { - Name string - Allocation *structs.Allocation - Volume *structs.CSIVolume - UsageOptions *UsageOptions - PluginErr error - ExpectedErr error - ExpectedCSICallCount int64 + Name string + Allocation *structs.Allocation + Volume *structs.CSIVolume + UsageOptions *UsageOptions + PluginErr error + ExpectedErr error + ExpectedCSICallCount int64 + ExpectedVolumeCapability *csi.VolumeCapability }{ { Name: "Returns an error when the plugin returns an error", @@ -281,6 +283,56 @@ func TestVolumeManager_publishVolume(t *testing.T) { ExpectedErr: nil, ExpectedCSICallCount: 1, }, + { + Name: "Mount options in the volume", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + MountOptions: &structs.CSIMountOptions{ + MountFlags: []string{"ro"}, + }, + }, + UsageOptions: &UsageOptions{}, + PluginErr: nil, + ExpectedErr: nil, + ExpectedCSICallCount: 1, + ExpectedVolumeCapability: &csi.VolumeCapability{ + AccessType: csi.VolumeAccessTypeMount, + AccessMode: csi.VolumeAccessModeMultiNodeMultiWriter, + MountVolume: &structs.CSIMountOptions{ + MountFlags: []string{"ro"}, + }, + }, + }, + { + Name: "Mount options override in the request", + Allocation: structs.MockAlloc(), + Volume: &structs.CSIVolume{ + ID: "foo", + AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem, + AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter, + MountOptions: &structs.CSIMountOptions{ + MountFlags: []string{"ro"}, + }, + }, + UsageOptions: &UsageOptions{ + MountOptions: &structs.CSIMountOptions{ + MountFlags: []string{"rw"}, + }, + }, + PluginErr: nil, + ExpectedErr: nil, + ExpectedCSICallCount: 1, + ExpectedVolumeCapability: &csi.VolumeCapability{ + AccessType: csi.VolumeAccessTypeMount, + AccessMode: csi.VolumeAccessModeMultiNodeMultiWriter, + MountVolume: &structs.CSIMountOptions{ + MountFlags: []string{"rw"}, + }, + }, + }, } for _, tc := range cases { @@ -303,6 +355,11 @@ func TestVolumeManager_publishVolume(t *testing.T) { } require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodePublishVolumeCallCount) + + if tc.ExpectedVolumeCapability != nil { + require.Equal(t, tc.ExpectedVolumeCapability, csiFake.PrevVolumeCapability) + } + }) } } diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index edba2a39c..b76329c56 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -762,6 +762,13 @@ func ApiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) { Source: v.Source, } + if v.MountOptions != nil { + vol.MountOptions = &structs.CSIMountOptions{ + FSType: v.MountOptions.FSType, + MountFlags: v.MountOptions.MountFlags, + } + } + tg.Volumes[k] = vol } } diff --git a/command/alloc_status.go b/command/alloc_status.go index 666e468a3..b9479aefd 100644 --- a/command/alloc_status.go +++ b/command/alloc_status.go @@ -781,7 +781,7 @@ FOUND: vol.Provider, vol.Schedulable, volReq.ReadOnly, - "n/a", // TODO(tgross): https://github.com/hashicorp/nomad/issues/7007 + csiVolMountOption(vol.MountOptions, volReq.MountOptions), )) } else { csiVolumesOutput = append(csiVolumesOutput, diff --git a/command/node_status.go b/command/node_status.go index 307d55028..2651c4375 100644 --- a/command/node_status.go +++ b/command/node_status.go @@ -541,7 +541,7 @@ func (c *NodeStatusCommand) outputNodeCSIVolumeInfo(client *api.Client, node *ap // Duplicate nodeCSIVolumeNames to sort by name but also index volume names to ids var names []string - volNames := map[string]string{} + requests := map[string]*api.VolumeRequest{} for _, alloc := range runningAllocs { tg := alloc.GetTaskGroup() if tg == nil || len(tg.Volumes) == 0 { @@ -550,7 +550,7 @@ func (c *NodeStatusCommand) outputNodeCSIVolumeInfo(client *api.Client, node *ap for _, v := range tg.Volumes { names = append(names, v.Name) - volNames[v.Source] = v.Name + requests[v.Source] = v } } if len(names) == 0 { @@ -563,23 +563,25 @@ func (c *NodeStatusCommand) outputNodeCSIVolumeInfo(client *api.Client, node *ap volumes := map[string]*api.CSIVolumeListStub{} vs, _ := client.Nodes().CSIVolumes(node.ID, nil) for _, v := range vs { - n := volNames[v.ID] + n := requests[v.ID].Name volumes[n] = v } // Output the volumes in name order output := make([]string, 0, len(names)+1) - output = append(output, "ID|Name|Plugin ID|Schedulable|Provider|Access Mode") + output = append(output, "ID|Name|Plugin ID|Schedulable|Provider|Access Mode|Mount Options") for _, name := range names { v := volumes[name] + r := requests[v.ID] output = append(output, fmt.Sprintf( - "%s|%s|%s|%t|%s|%s", + "%s|%s|%s|%t|%s|%s|%s", v.ID, name, v.PluginID, v.Schedulable, v.Provider, v.AccessMode, + csiVolMountOption(v.MountOptions, r.MountOptions), )) } diff --git a/command/volume_status_csi.go b/command/volume_status_csi.go index 4838c009c..95f6883bf 100644 --- a/command/volume_status_csi.go +++ b/command/volume_status_csi.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/nomad/structs" ) func (c *VolumeStatusCommand) csiBanner() { @@ -105,6 +106,7 @@ func (c *VolumeStatusCommand) formatBasic(vol *api.CSIVolume) (string, error) { fmt.Sprintf("Access Mode|%s", vol.AccessMode), fmt.Sprintf("Attachment Mode|%s", vol.AttachmentMode), + fmt.Sprintf("Mount Options|%s", csiVolMountOption(vol.MountOptions, nil)), fmt.Sprintf("Namespace|%s", vol.Namespace), } @@ -151,3 +153,39 @@ func (c *VolumeStatusCommand) formatTopologies(vol *api.CSIVolume) string { return strings.Join(out, "\n") } + +func csiVolMountOption(volume, request *api.CSIMountOptions) string { + var req, opts *structs.CSIMountOptions + + if request != nil { + req = &structs.CSIMountOptions{ + FSType: request.FSType, + MountFlags: request.MountFlags, + } + } + + if volume == nil { + opts = req + } else { + opts = &structs.CSIMountOptions{ + FSType: volume.FSType, + MountFlags: volume.MountFlags, + } + opts.Merge(req) + } + + if opts == nil { + return "" + } + + var out string + if opts.FSType != "" { + out = fmt.Sprintf("fs_type: %s", opts.FSType) + } + + if len(opts.MountFlags) > 0 { + out = fmt.Sprintf("%s flags: %s", out, strings.Join(opts.MountFlags, ", ")) + } + + return out +} diff --git a/jobspec/parse_group.go b/jobspec/parse_group.go index 062c96074..86c078658 100644 --- a/jobspec/parse_group.go +++ b/jobspec/parse_group.go @@ -295,41 +295,17 @@ func parseRestartPolicy(final **api.RestartPolicy, list *ast.ObjectList) error { } func parseVolumes(out *map[string]*api.VolumeRequest, list *ast.ObjectList) error { - volumes := make(map[string]*api.VolumeRequest, len(list.Items)) + hcl.DecodeObject(out, list) - for _, item := range list.Items { - n := item.Keys[0].Token.Value().(string) - valid := []string{ - "type", - "read_only", - "hidden", - "source", - } - if err := helper.CheckHCLKeys(item.Val, valid); err != nil { - return err - } - - var m map[string]interface{} - if err := hcl.DecodeObject(&m, item.Val); err != nil { - return err - } - - var result api.VolumeRequest - dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - WeaklyTypedInput: true, - Result: &result, - }) + for k, v := range *out { + err := helper.UnusedKeys(v) if err != nil { return err } - if err := dec.Decode(m); err != nil { - return err - } - - result.Name = n - volumes[n] = &result + // This is supported by `hcl:",key"`, but that only works if we start at the + // parent ast.ObjectItem + v.Name = k } - *out = volumes return nil } diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index ed66f05ae..924d3ab71 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -117,11 +117,32 @@ func TestParse(t *testing.T) { Operand: "=", }, }, - Volumes: map[string]*api.VolumeRequest{ "foo": { - Name: "foo", - Type: "host", + Name: "foo", + Type: "host", + Source: "/path", + ExtraKeysHCL: nil, + }, + "bar": { + Name: "bar", + Type: "csi", + Source: "bar-vol", + MountOptions: &api.CSIMountOptions{ + FSType: "ext4", + }, + ExtraKeysHCL: nil, + }, + "baz": { + Name: "baz", + Type: "csi", + Source: "bar-vol", + MountOptions: &api.CSIMountOptions{ + MountFlags: []string{ + "ro", + }, + }, + ExtraKeysHCL: nil, }, }, Affinities: []*api.Affinity{ diff --git a/jobspec/test-fixtures/basic.hcl b/jobspec/test-fixtures/basic.hcl index b5068a1c4..ba826bdc0 100644 --- a/jobspec/test-fixtures/basic.hcl +++ b/jobspec/test-fixtures/basic.hcl @@ -71,7 +71,26 @@ job "binstore-storagelocker" { count = 5 volume "foo" { - type = "host" + type = "host" + source = "/path" + } + + volume "bar" { + type = "csi" + source = "bar-vol" + + mount_options { + fs_type = "ext4" + } + } + + volume "baz" { + type = "csi" + source = "bar-vol" + + mount_options { + mount_flags = ["ro"] + } } restart { diff --git a/nomad/csi_endpoint.go b/nomad/csi_endpoint.go index 15d106d2c..b0968c615 100644 --- a/nomad/csi_endpoint.go +++ b/nomad/csi_endpoint.go @@ -551,9 +551,6 @@ func (srv *Server) controllerPublishVolume(req *structs.CSIVolumeClaimRequest, r AttachmentMode: vol.AttachmentMode, AccessMode: vol.AccessMode, ReadOnly: req.Claim == structs.CSIVolumeClaimRead, - // TODO(tgross): we don't have a way of setting these yet. - // ref https://github.com/hashicorp/nomad/issues/7007 - // MountOptions: vol.MountOptions, } cReq.PluginID = plug.ID cReq.ControllerNodeID = nodeID diff --git a/nomad/structs/csi.go b/nomad/structs/csi.go index 704fece53..bbd19b99a 100644 --- a/nomad/structs/csi.go +++ b/nomad/structs/csi.go @@ -135,6 +135,56 @@ func ValidCSIVolumeWriteAccessMode(accessMode CSIVolumeAccessMode) bool { } } +// CSIMountOptions contain optional additional configuration that can be used +// when specifying that a Volume should be used with VolumeAccessTypeMount. +type CSIMountOptions struct { + // FSType is an optional field that allows an operator to specify the type + // of the filesystem. + FSType string + + // MountFlags contains additional options that may be used when mounting the + // volume by the plugin. This may contain sensitive data and should not be + // leaked. + MountFlags []string +} + +func (o *CSIMountOptions) Copy() *CSIMountOptions { + if o == nil { + return nil + } + return &(*o) +} + +func (o *CSIMountOptions) Merge(p *CSIMountOptions) { + if p == nil { + return + } + if p.FSType != "" { + o.FSType = p.FSType + } + if p.MountFlags != nil { + o.MountFlags = p.MountFlags + } +} + +// VolumeMountOptions implements the Stringer and GoStringer interfaces to prevent +// accidental leakage of sensitive mount flags via logs. +var _ fmt.Stringer = &CSIMountOptions{} +var _ fmt.GoStringer = &CSIMountOptions{} + +func (v *CSIMountOptions) String() string { + mountFlagsString := "nil" + if len(v.MountFlags) != 0 { + mountFlagsString = "[REDACTED]" + } + + return fmt.Sprintf("csi.CSIOptions(FSType: %s, MountFlags: %s)", v.FSType, mountFlagsString) +} + +func (v *CSIMountOptions) GoString() string { + return v.String() +} + // CSIVolume is the full representation of a CSI Volume type CSIVolume struct { // ID is a namespace unique URL safe identifier for the volume @@ -147,6 +197,7 @@ type CSIVolume struct { Topologies []*CSITopology AccessMode CSIVolumeAccessMode AttachmentMode CSIVolumeAttachmentMode + MountOptions *CSIMountOptions // Allocations, tracking claim status ReadAllocs map[string]*Allocation @@ -178,6 +229,7 @@ type CSIVolListStub struct { Topologies []*CSITopology AccessMode CSIVolumeAccessMode AttachmentMode CSIVolumeAttachmentMode + MountOptions *CSIMountOptions CurrentReaders int CurrentWriters int Schedulable bool @@ -228,6 +280,7 @@ func (v *CSIVolume) Stub() *CSIVolListStub { Topologies: v.Topologies, AccessMode: v.AccessMode, AttachmentMode: v.AttachmentMode, + MountOptions: v.MountOptions, CurrentReaders: len(v.ReadAllocs), CurrentWriters: len(v.WriteAllocs), Schedulable: v.Schedulable, diff --git a/nomad/structs/volumes.go b/nomad/structs/volumes.go index fe44e4830..e29d1c42b 100644 --- a/nomad/structs/volumes.go +++ b/nomad/structs/volumes.go @@ -86,10 +86,11 @@ func HostVolumeSliceMerge(a, b []*ClientHostVolumeConfig) []*ClientHostVolumeCon // VolumeRequest is a representation of a storage volume that a TaskGroup wishes to use. type VolumeRequest struct { - Name string - Type string - Source string - ReadOnly bool + Name string + Type string + Source string + ReadOnly bool + MountOptions *CSIMountOptions } func (v *VolumeRequest) Copy() *VolumeRequest { @@ -99,6 +100,12 @@ func (v *VolumeRequest) Copy() *VolumeRequest { nv := new(VolumeRequest) *nv = *v + if v.MountOptions == nil { + return nv + } + + nv.MountOptions = &(*v.MountOptions) + return nv } diff --git a/plugins/csi/fake/client.go b/plugins/csi/fake/client.go index 162b6bb73..b971ce260 100644 --- a/plugins/csi/fake/client.go +++ b/plugins/csi/fake/client.go @@ -67,6 +67,7 @@ type Client struct { NextNodeUnstageVolumeErr error NodeUnstageVolumeCallCount int64 + PrevVolumeCapability *csi.VolumeCapability NextNodePublishVolumeErr error NodePublishVolumeCallCount int64 @@ -217,6 +218,7 @@ func (c *Client) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolu c.Mu.Lock() defer c.Mu.Unlock() + c.PrevVolumeCapability = req.VolumeCapability c.NodePublishVolumeCallCount++ return c.NextNodePublishVolumeErr diff --git a/plugins/csi/plugin.go b/plugins/csi/plugin.go index 97e463dee..345b9f753 100644 --- a/plugins/csi/plugin.go +++ b/plugins/csi/plugin.go @@ -357,42 +357,13 @@ func (v VolumeAccessType) String() string { } } -// VolumeMountOptions contain optional additional configuration that can be used -// when specifying that a Volume should be used with VolumeAccessTypeMount. -type VolumeMountOptions struct { - // FSType is an optional field that allows an operator to specify the type - // of the filesystem. - FSType string - - // MountFlags contains additional options that may be used when mounting the - // volume by the plugin. This may contain sensitive data and should not be - // leaked. - MountFlags []string -} - -// VolumeMountOptions implements the Stringer and GoStringer interfaces to prevent -// accidental leakage of sensitive mount flags via logs. -var _ fmt.Stringer = &VolumeMountOptions{} -var _ fmt.GoStringer = &VolumeMountOptions{} - -func (v *VolumeMountOptions) String() string { - mountFlagsString := "nil" - if len(v.MountFlags) != 0 { - mountFlagsString = "[REDACTED]" - } - - return fmt.Sprintf("csi.VolumeMountOptions(FSType: %s, MountFlags: %s)", v.FSType, mountFlagsString) -} - -func (v *VolumeMountOptions) GoString() string { - return v.String() -} - // VolumeCapability describes the overall usage requirements for a given CSI Volume type VolumeCapability struct { - AccessType VolumeAccessType - AccessMode VolumeAccessMode - VolumeMountOptions *VolumeMountOptions + AccessType VolumeAccessType + AccessMode VolumeAccessMode + + // Indicate that the volume will be accessed via the filesystem API. + MountVolume *structs.CSIMountOptions } func VolumeCapabilityFromStructs(sAccessType structs.CSIVolumeAttachmentMode, sAccessMode structs.CSIVolumeAccessMode) (*VolumeCapability, error) { @@ -431,11 +402,8 @@ func VolumeCapabilityFromStructs(sAccessType structs.CSIVolumeAttachmentMode, sA } return &VolumeCapability{ - AccessType: accessType, - AccessMode: accessMode, - VolumeMountOptions: &VolumeMountOptions{ - // GH-7007: Currently we have no way to provide these - }, + AccessType: accessType, + AccessMode: accessMode, }, nil } @@ -451,12 +419,12 @@ func (c *VolumeCapability) ToCSIRepresentation() *csipbv1.VolumeCapability { } if c.AccessType == VolumeAccessTypeMount { - vc.AccessType = &csipbv1.VolumeCapability_Mount{ - Mount: &csipbv1.VolumeCapability_MountVolume{ - FsType: c.VolumeMountOptions.FSType, - MountFlags: c.VolumeMountOptions.MountFlags, - }, + opts := &csipbv1.VolumeCapability_MountVolume{} + if c.MountVolume != nil { + opts.FsType = c.MountVolume.FSType + opts.MountFlags = c.MountVolume.MountFlags } + vc.AccessType = &csipbv1.VolumeCapability_Mount{Mount: opts} } else { vc.AccessType = &csipbv1.VolumeCapability_Block{Block: &csipbv1.VolumeCapability_BlockVolume{}} }