2019-10-16 20:53:39 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
2020-02-04 13:00:00 +00:00
|
|
|
"fmt"
|
2019-10-16 20:53:39 +00:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
|
|
|
"github.com/hashicorp/nomad/acl"
|
2020-01-16 15:40:40 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2019-10-16 20:53:39 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
2020-03-26 21:07:18 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
2019-10-16 20:53:39 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/testutil"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestCSIVolumeEndpoint_Get(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
srv, shutdown := TestServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
})
|
|
|
|
defer shutdown()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
ns := structs.DefaultNamespace
|
|
|
|
|
2020-02-11 11:41:18 +00:00
|
|
|
state := srv.fsm.State()
|
|
|
|
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
|
|
|
|
id0 := uuid.Generate()
|
|
|
|
|
|
|
|
// Create the volume
|
|
|
|
vols := []*structs.CSIVolume{{
|
|
|
|
ID: id0,
|
|
|
|
Namespace: ns,
|
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
|
|
|
PluginID: "minnie",
|
|
|
|
}}
|
|
|
|
err := state.CSIVolumeRegister(999, vols)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
req := &structs.CSIVolumeGetRequest{
|
|
|
|
ID: id0,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: ns,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp structs.CSIVolumeGetResponse
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req, &resp)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, uint64(999), resp.Index)
|
|
|
|
require.Equal(t, vols[0].ID, resp.Volume.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCSIVolumeEndpoint_Get_ACL(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
srv, shutdown := TestServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
})
|
|
|
|
defer shutdown()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
ns := structs.DefaultNamespace
|
|
|
|
|
2019-10-16 20:53:39 +00:00
|
|
|
state := srv.fsm.State()
|
|
|
|
state.BootstrapACLTokens(1, 0, mock.ACLManagementToken())
|
|
|
|
srv.config.ACLEnabled = true
|
2020-03-17 21:32:39 +00:00
|
|
|
policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIReadVolume})
|
2019-10-16 20:53:39 +00:00
|
|
|
validToken := mock.CreatePolicyAndToken(t, state, 1001, "csi-access", policy)
|
|
|
|
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
|
2020-01-16 15:40:40 +00:00
|
|
|
id0 := uuid.Generate()
|
|
|
|
|
2019-10-16 20:53:39 +00:00
|
|
|
// Create the volume
|
|
|
|
vols := []*structs.CSIVolume{{
|
2020-01-16 15:40:40 +00:00
|
|
|
ID: id0,
|
2019-10-16 20:53:39 +00:00
|
|
|
Namespace: ns,
|
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
2020-01-28 15:28:34 +00:00
|
|
|
PluginID: "minnie",
|
2019-10-16 20:53:39 +00:00
|
|
|
}}
|
2020-02-03 16:59:00 +00:00
|
|
|
err := state.CSIVolumeRegister(999, vols)
|
2019-10-16 20:53:39 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
req := &structs.CSIVolumeGetRequest{
|
2020-01-16 15:40:40 +00:00
|
|
|
ID: id0,
|
2019-10-16 20:53:39 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: ns,
|
|
|
|
AuthToken: validToken.SecretID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var resp structs.CSIVolumeGetResponse
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req, &resp)
|
|
|
|
require.NoError(t, err)
|
2020-02-03 16:59:00 +00:00
|
|
|
require.Equal(t, uint64(999), resp.Index)
|
2019-10-16 20:53:39 +00:00
|
|
|
require.Equal(t, vols[0].ID, resp.Volume.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCSIVolumeEndpoint_Register(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
srv, shutdown := TestServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
})
|
|
|
|
defer shutdown()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
ns := structs.DefaultNamespace
|
|
|
|
|
|
|
|
state := srv.fsm.State()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
|
2020-01-16 15:40:40 +00:00
|
|
|
id0 := uuid.Generate()
|
|
|
|
|
2020-02-18 16:08:44 +00:00
|
|
|
// Create the node and plugin
|
|
|
|
node := mock.Node()
|
|
|
|
node.CSINodePlugins = map[string]*structs.CSIInfo{
|
|
|
|
"minnie": {PluginID: "minnie",
|
|
|
|
Healthy: true,
|
|
|
|
// Registers as node plugin that does not require a controller to skip
|
|
|
|
// the client RPC during registration.
|
|
|
|
NodeInfo: &structs.CSINodeInfo{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, state.UpsertNode(1000, node))
|
|
|
|
|
2019-10-16 20:53:39 +00:00
|
|
|
// Create the volume
|
|
|
|
vols := []*structs.CSIVolume{{
|
2020-01-16 15:40:40 +00:00
|
|
|
ID: id0,
|
2019-10-16 20:53:39 +00:00
|
|
|
Namespace: "notTheNamespace",
|
2020-01-28 15:28:34 +00:00
|
|
|
PluginID: "minnie",
|
2019-10-16 20:53:39 +00:00
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeReader,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
|
|
|
}}
|
|
|
|
|
|
|
|
// Create the register request
|
|
|
|
req1 := &structs.CSIVolumeRegisterRequest{
|
|
|
|
Volumes: vols,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: ns,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
resp1 := &structs.CSIVolumeRegisterResponse{}
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1)
|
|
|
|
require.NoError(t, err)
|
2020-02-03 16:59:00 +00:00
|
|
|
require.NotEqual(t, uint64(0), resp1.Index)
|
2019-10-16 20:53:39 +00:00
|
|
|
|
|
|
|
// Get the volume back out
|
|
|
|
req2 := &structs.CSIVolumeGetRequest{
|
2020-01-16 15:40:40 +00:00
|
|
|
ID: id0,
|
2019-10-16 20:53:39 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
2020-02-18 16:08:44 +00:00
|
|
|
Region: "global",
|
2019-10-16 20:53:39 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
resp2 := &structs.CSIVolumeGetResponse{}
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2)
|
|
|
|
require.NoError(t, err)
|
2020-02-03 16:59:00 +00:00
|
|
|
require.Equal(t, resp1.Index, resp2.Index)
|
2019-10-16 20:53:39 +00:00
|
|
|
require.Equal(t, vols[0].ID, resp2.Volume.ID)
|
|
|
|
|
|
|
|
// Registration does not update
|
2020-01-28 15:28:34 +00:00
|
|
|
req1.Volumes[0].PluginID = "adam"
|
2019-10-16 20:53:39 +00:00
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Register", req1, resp1)
|
|
|
|
require.Error(t, err, "exists")
|
|
|
|
|
|
|
|
// Deregistration works
|
|
|
|
req3 := &structs.CSIVolumeDeregisterRequest{
|
2020-01-16 15:40:40 +00:00
|
|
|
VolumeIDs: []string{id0},
|
2019-10-16 20:53:39 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: ns,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
resp3 := &structs.CSIVolumeDeregisterResponse{}
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Deregister", req3, resp3)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Volume is missing
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req2, resp2)
|
2020-01-28 15:28:34 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, resp2.Volume)
|
2019-10-16 20:53:39 +00:00
|
|
|
}
|
|
|
|
|
2020-02-04 13:00:00 +00:00
|
|
|
// TestCSIVolumeEndpoint_Claim exercises the VolumeClaim RPC, verifying that claims
|
|
|
|
// are honored only if the volume exists, the mode is permitted, and the volume
|
|
|
|
// is schedulable according to its count of claims.
|
|
|
|
func TestCSIVolumeEndpoint_Claim(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
srv, shutdown := TestServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
})
|
|
|
|
defer shutdown()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
state := srv.fsm.State()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
id0 := uuid.Generate()
|
2020-02-17 12:50:37 +00:00
|
|
|
alloc := mock.BatchAlloc()
|
2020-02-04 13:00:00 +00:00
|
|
|
|
|
|
|
// Create an initial volume claim request; we expect it to fail
|
|
|
|
// because there's no such volume yet.
|
|
|
|
claimReq := &structs.CSIVolumeClaimRequest{
|
2020-02-17 12:50:37 +00:00
|
|
|
VolumeID: id0,
|
|
|
|
AllocationID: alloc.ID,
|
|
|
|
Claim: structs.CSIVolumeClaimWrite,
|
2020-02-04 13:00:00 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
2020-04-02 14:13:41 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2020-02-04 13:00:00 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
claimResp := &structs.CSIVolumeClaimResponse{}
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp)
|
2020-03-16 19:59:42 +00:00
|
|
|
require.EqualError(t, err, fmt.Sprintf("controller publish: volume not found: %s", id0),
|
2020-02-04 13:00:00 +00:00
|
|
|
"expected 'volume not found' error because volume hasn't yet been created")
|
|
|
|
|
2020-02-17 12:50:37 +00:00
|
|
|
// Create a client node, plugin, alloc, and volume
|
2020-02-04 13:00:00 +00:00
|
|
|
node := mock.Node()
|
|
|
|
node.CSINodePlugins = map[string]*structs.CSIInfo{
|
2020-02-17 12:50:37 +00:00
|
|
|
"minnie": {
|
|
|
|
PluginID: "minnie",
|
2020-02-04 13:00:00 +00:00
|
|
|
Healthy: true,
|
|
|
|
NodeInfo: &structs.CSINodeInfo{},
|
|
|
|
},
|
|
|
|
}
|
2020-02-13 15:18:55 +00:00
|
|
|
err = state.UpsertNode(1002, node)
|
2020-02-04 13:00:00 +00:00
|
|
|
require.NoError(t, err)
|
2020-02-17 12:50:37 +00:00
|
|
|
|
2020-02-04 13:00:00 +00:00
|
|
|
vols := []*structs.CSIVolume{{
|
|
|
|
ID: id0,
|
2020-04-02 14:13:41 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2020-02-04 13:00:00 +00:00
|
|
|
PluginID: "minnie",
|
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
|
|
|
Topologies: []*structs.CSITopology{{
|
|
|
|
Segments: map[string]string{"foo": "bar"},
|
|
|
|
}},
|
|
|
|
}}
|
2020-02-13 15:18:55 +00:00
|
|
|
err = state.CSIVolumeRegister(1003, vols)
|
2020-02-17 12:50:37 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-03-24 01:21:04 +00:00
|
|
|
// Verify that the volume exists, and is healthy
|
|
|
|
volGetReq := &structs.CSIVolumeGetRequest{
|
|
|
|
ID: id0,
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
2020-04-02 14:13:41 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2020-03-24 01:21:04 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
volGetResp := &structs.CSIVolumeGetResponse{}
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, id0, volGetResp.Volume.ID)
|
|
|
|
require.True(t, volGetResp.Volume.Schedulable)
|
|
|
|
require.Len(t, volGetResp.Volume.ReadAllocs, 0)
|
|
|
|
require.Len(t, volGetResp.Volume.WriteAllocs, 0)
|
|
|
|
|
2020-02-17 12:50:37 +00:00
|
|
|
// Upsert the job and alloc
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
summary := mock.JobSummary(alloc.JobID)
|
|
|
|
require.NoError(t, state.UpsertJobSummary(1004, summary))
|
|
|
|
require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc}))
|
2020-02-04 13:00:00 +00:00
|
|
|
|
2020-02-13 15:18:55 +00:00
|
|
|
// Now our claim should succeed
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp)
|
2020-02-04 13:00:00 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-02-13 15:18:55 +00:00
|
|
|
// Verify the claim was set
|
2020-02-04 13:00:00 +00:00
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, id0, volGetResp.Volume.ID)
|
|
|
|
require.Len(t, volGetResp.Volume.ReadAllocs, 0)
|
|
|
|
require.Len(t, volGetResp.Volume.WriteAllocs, 1)
|
|
|
|
|
2020-03-24 01:21:04 +00:00
|
|
|
// Make another writer claim for a different job
|
2020-02-17 12:50:37 +00:00
|
|
|
alloc2 := mock.Alloc()
|
2020-03-24 01:21:04 +00:00
|
|
|
alloc2.JobID = uuid.Generate()
|
2020-02-17 12:50:37 +00:00
|
|
|
summary = mock.JobSummary(alloc2.JobID)
|
|
|
|
require.NoError(t, state.UpsertJobSummary(1005, summary))
|
|
|
|
require.NoError(t, state.UpsertAllocs(1006, []*structs.Allocation{alloc2}))
|
|
|
|
claimReq.AllocationID = alloc2.ID
|
2020-02-04 13:00:00 +00:00
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp)
|
|
|
|
require.EqualError(t, err, "volume max claim reached",
|
|
|
|
"expected 'volume max claim reached' because we only allow 1 writer")
|
|
|
|
|
|
|
|
// Fix the mode and our claim will succeed
|
|
|
|
claimReq.Claim = structs.CSIVolumeClaimRead
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Verify the new claim was set
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, id0, volGetResp.Volume.ID)
|
|
|
|
require.Len(t, volGetResp.Volume.ReadAllocs, 1)
|
|
|
|
require.Len(t, volGetResp.Volume.WriteAllocs, 1)
|
|
|
|
|
|
|
|
// Claim is idempotent
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp)
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", volGetReq, volGetResp)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, id0, volGetResp.Volume.ID)
|
|
|
|
require.Len(t, volGetResp.Volume.ReadAllocs, 1)
|
|
|
|
require.Len(t, volGetResp.Volume.WriteAllocs, 1)
|
|
|
|
}
|
|
|
|
|
2020-02-13 15:18:55 +00:00
|
|
|
// TestCSIVolumeEndpoint_ClaimWithController exercises the VolumeClaim RPC
|
|
|
|
// when a controller is required.
|
|
|
|
func TestCSIVolumeEndpoint_ClaimWithController(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
srv, shutdown := TestServer(t, func(c *Config) {
|
|
|
|
c.ACLEnabled = true
|
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
})
|
|
|
|
defer shutdown()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
ns := structs.DefaultNamespace
|
|
|
|
state := srv.fsm.State()
|
|
|
|
state.BootstrapACLTokens(1, 0, mock.ACLManagementToken())
|
2020-03-17 21:32:39 +00:00
|
|
|
|
|
|
|
policy := mock.NamespacePolicy(ns, "", []string{acl.NamespaceCapabilityCSIMountVolume}) +
|
|
|
|
mock.PluginPolicy("read")
|
|
|
|
accessToken := mock.CreatePolicyAndToken(t, state, 1001, "claim", policy)
|
|
|
|
|
2020-02-13 15:18:55 +00:00
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
id0 := uuid.Generate()
|
|
|
|
|
2020-02-17 12:50:37 +00:00
|
|
|
// Create a client node, plugin, alloc, and volume
|
2020-02-13 15:18:55 +00:00
|
|
|
node := mock.Node()
|
|
|
|
node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version
|
|
|
|
node.CSIControllerPlugins = map[string]*structs.CSIInfo{
|
2020-03-10 14:22:42 +00:00
|
|
|
"minnie": {
|
|
|
|
PluginID: "minnie",
|
|
|
|
Healthy: true,
|
|
|
|
ControllerInfo: &structs.CSIControllerInfo{
|
|
|
|
SupportsAttachDetach: true,
|
|
|
|
},
|
2020-02-13 15:18:55 +00:00
|
|
|
RequiresControllerPlugin: true,
|
|
|
|
},
|
|
|
|
}
|
2020-02-21 10:32:10 +00:00
|
|
|
node.CSINodePlugins = map[string]*structs.CSIInfo{
|
2020-03-10 14:22:42 +00:00
|
|
|
"minnie": {
|
|
|
|
PluginID: "minnie",
|
|
|
|
Healthy: true,
|
|
|
|
NodeInfo: &structs.CSINodeInfo{},
|
2020-02-21 10:32:10 +00:00
|
|
|
},
|
|
|
|
}
|
2020-02-13 15:18:55 +00:00
|
|
|
err := state.UpsertNode(1002, node)
|
|
|
|
require.NoError(t, err)
|
|
|
|
vols := []*structs.CSIVolume{{
|
|
|
|
ID: id0,
|
2020-03-17 15:35:34 +00:00
|
|
|
Namespace: ns,
|
2020-02-13 15:18:55 +00:00
|
|
|
PluginID: "minnie",
|
|
|
|
ControllerRequired: true,
|
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
|
|
|
}}
|
|
|
|
err = state.CSIVolumeRegister(1003, vols)
|
|
|
|
|
2020-02-17 12:50:37 +00:00
|
|
|
alloc := mock.BatchAlloc()
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
summary := mock.JobSummary(alloc.JobID)
|
|
|
|
require.NoError(t, state.UpsertJobSummary(1004, summary))
|
|
|
|
require.NoError(t, state.UpsertAllocs(1005, []*structs.Allocation{alloc}))
|
|
|
|
|
2020-02-13 15:18:55 +00:00
|
|
|
// Make the volume claim
|
|
|
|
claimReq := &structs.CSIVolumeClaimRequest{
|
2020-02-17 12:50:37 +00:00
|
|
|
VolumeID: id0,
|
|
|
|
AllocationID: alloc.ID,
|
|
|
|
Claim: structs.CSIVolumeClaimWrite,
|
2020-02-13 15:18:55 +00:00
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: "global",
|
|
|
|
Namespace: ns,
|
|
|
|
AuthToken: accessToken.SecretID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
claimResp := &structs.CSIVolumeClaimResponse{}
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim", claimReq, claimResp)
|
2020-02-21 10:32:10 +00:00
|
|
|
// Because the node is not registered
|
2020-03-16 19:59:42 +00:00
|
|
|
require.EqualError(t, err, "controller publish: attach volume: No path to node")
|
2020-02-13 15:18:55 +00:00
|
|
|
}
|
|
|
|
|
2019-10-16 20:53:39 +00:00
|
|
|
func TestCSIVolumeEndpoint_List(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
srv, shutdown := TestServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
})
|
|
|
|
defer shutdown()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
state := srv.fsm.State()
|
|
|
|
state.BootstrapACLTokens(1, 0, mock.ACLManagementToken())
|
|
|
|
srv.config.ACLEnabled = true
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
|
2020-04-02 14:13:41 +00:00
|
|
|
nsPolicy := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityCSIReadVolume}) +
|
2020-03-17 21:32:39 +00:00
|
|
|
mock.PluginPolicy("read")
|
2020-03-24 01:21:40 +00:00
|
|
|
nsTok := mock.CreatePolicyAndToken(t, state, 1000, "csi-token-name", nsPolicy)
|
2020-03-17 15:35:34 +00:00
|
|
|
|
2020-03-24 01:21:40 +00:00
|
|
|
// Empty list results
|
|
|
|
req := &structs.CSIVolumeListRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
AuthToken: nsTok.SecretID,
|
2020-04-02 14:13:41 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2020-03-24 01:21:40 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
var resp structs.CSIVolumeListResponse
|
|
|
|
err := msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, resp.Volumes)
|
|
|
|
require.Equal(t, 0, len(resp.Volumes))
|
|
|
|
|
|
|
|
// Create the volume
|
2020-01-16 15:40:40 +00:00
|
|
|
id0 := uuid.Generate()
|
|
|
|
id1 := uuid.Generate()
|
2019-10-16 20:53:39 +00:00
|
|
|
vols := []*structs.CSIVolume{{
|
2020-01-16 15:40:40 +00:00
|
|
|
ID: id0,
|
2020-04-02 14:13:41 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2019-10-16 20:53:39 +00:00
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeReader,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
2020-01-28 15:28:34 +00:00
|
|
|
PluginID: "minnie",
|
2019-10-16 20:53:39 +00:00
|
|
|
}, {
|
2020-01-16 15:40:40 +00:00
|
|
|
ID: id1,
|
2020-04-02 14:13:41 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2019-10-16 20:53:39 +00:00
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
2020-01-28 15:28:34 +00:00
|
|
|
PluginID: "adam",
|
2019-10-16 20:53:39 +00:00
|
|
|
}}
|
2020-03-24 01:21:40 +00:00
|
|
|
err = state.CSIVolumeRegister(1002, vols)
|
2019-10-16 20:53:39 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-03-17 15:35:34 +00:00
|
|
|
// Query everything in the namespace
|
2019-10-16 20:53:39 +00:00
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp)
|
|
|
|
require.NoError(t, err)
|
2020-03-17 15:35:34 +00:00
|
|
|
|
|
|
|
require.Equal(t, uint64(1002), resp.Index)
|
2019-10-16 20:53:39 +00:00
|
|
|
require.Equal(t, 2, len(resp.Volumes))
|
|
|
|
ids := map[string]bool{vols[0].ID: true, vols[1].ID: true}
|
|
|
|
for _, v := range resp.Volumes {
|
|
|
|
delete(ids, v.ID)
|
|
|
|
}
|
|
|
|
require.Equal(t, 0, len(ids))
|
|
|
|
|
2020-03-17 15:35:34 +00:00
|
|
|
// Query by PluginID in ns
|
2019-10-16 20:53:39 +00:00
|
|
|
req = &structs.CSIVolumeListRequest{
|
2020-01-28 15:28:34 +00:00
|
|
|
PluginID: "adam",
|
2019-10-16 20:53:39 +00:00
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
2020-04-02 14:13:41 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2019-10-16 20:53:39 +00:00
|
|
|
AuthToken: nsTok.SecretID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.List", req, &resp)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, len(resp.Volumes))
|
|
|
|
require.Equal(t, vols[1].ID, resp.Volumes[0].ID)
|
|
|
|
}
|
2020-01-28 15:28:34 +00:00
|
|
|
|
2020-02-21 19:48:16 +00:00
|
|
|
func TestCSIPluginEndpoint_RegisterViaFingerprint(t *testing.T) {
|
2020-01-28 15:28:34 +00:00
|
|
|
t.Parallel()
|
|
|
|
srv, shutdown := TestServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
})
|
|
|
|
defer shutdown()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
2020-03-26 21:07:18 +00:00
|
|
|
deleteNodes := state.CreateTestCSIPlugin(srv.fsm.State(), "foo")
|
2020-02-21 19:48:16 +00:00
|
|
|
defer deleteNodes()
|
2020-01-28 15:28:34 +00:00
|
|
|
|
|
|
|
state := srv.fsm.State()
|
|
|
|
state.BootstrapACLTokens(1, 0, mock.ACLManagementToken())
|
|
|
|
srv.config.ACLEnabled = true
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
|
|
|
|
// Get the plugin back out
|
2020-03-18 19:29:03 +00:00
|
|
|
listJob := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob})
|
|
|
|
policy := mock.PluginPolicy("read") + listJob
|
|
|
|
getToken := mock.CreatePolicyAndToken(t, state, 1001, "plugin-read", policy)
|
2020-01-28 15:28:34 +00:00
|
|
|
|
|
|
|
req2 := &structs.CSIPluginGetRequest{
|
|
|
|
ID: "foo",
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
AuthToken: getToken.SecretID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
resp2 := &structs.CSIPluginGetResponse{}
|
2020-02-21 19:48:16 +00:00
|
|
|
err := msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2)
|
2020-01-28 15:28:34 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-03-18 19:29:03 +00:00
|
|
|
// Get requires plugin-read, not plugin-list
|
|
|
|
lPolicy := mock.PluginPolicy("list")
|
|
|
|
lTok := mock.CreatePolicyAndToken(t, state, 1003, "plugin-list", lPolicy)
|
|
|
|
req2.AuthToken = lTok.SecretID
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2)
|
|
|
|
require.Error(t, err, "Permission denied")
|
|
|
|
|
2020-01-28 15:28:34 +00:00
|
|
|
// List plugins
|
|
|
|
req3 := &structs.CSIPluginListRequest{
|
|
|
|
QueryOptions: structs.QueryOptions{
|
|
|
|
Region: "global",
|
|
|
|
AuthToken: getToken.SecretID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
resp3 := &structs.CSIPluginListResponse{}
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.List", req3, resp3)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, len(resp3.Plugins))
|
|
|
|
|
2020-03-09 15:24:14 +00:00
|
|
|
// ensure that plugin->alloc denormalization does COW correctly
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.List", req3, resp3)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, len(resp3.Plugins))
|
|
|
|
|
2020-03-18 19:29:03 +00:00
|
|
|
// List allows plugin-list
|
|
|
|
req3.AuthToken = lTok.SecretID
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.List", req3, resp3)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, len(resp3.Plugins))
|
|
|
|
|
2020-01-28 15:28:34 +00:00
|
|
|
// Deregistration works
|
2020-02-21 19:48:16 +00:00
|
|
|
deleteNodes()
|
2020-01-28 15:28:34 +00:00
|
|
|
|
|
|
|
// Plugin is missing
|
2020-03-18 19:29:03 +00:00
|
|
|
req2.AuthToken = getToken.SecretID
|
2020-01-28 15:28:34 +00:00
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIPlugin.Get", req2, resp2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, resp2.Plugin)
|
|
|
|
}
|
2020-02-13 15:18:55 +00:00
|
|
|
|
|
|
|
func TestCSI_RPCVolumeAndPluginLookup(t *testing.T) {
|
|
|
|
srv, shutdown := TestServer(t, func(c *Config) {})
|
|
|
|
defer shutdown()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
state := srv.fsm.State()
|
|
|
|
id0 := uuid.Generate()
|
|
|
|
id1 := uuid.Generate()
|
|
|
|
id2 := uuid.Generate()
|
|
|
|
|
|
|
|
// Create a client node with a plugin
|
|
|
|
node := mock.Node()
|
|
|
|
node.CSINodePlugins = map[string]*structs.CSIInfo{
|
2020-03-10 14:22:42 +00:00
|
|
|
"minnie": {PluginID: "minnie", Healthy: true, RequiresControllerPlugin: true,
|
|
|
|
ControllerInfo: &structs.CSIControllerInfo{SupportsAttachDetach: true},
|
|
|
|
},
|
|
|
|
"adam": {PluginID: "adam", Healthy: true},
|
2020-02-13 15:18:55 +00:00
|
|
|
}
|
|
|
|
err := state.UpsertNode(3, node)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Create 2 volumes
|
|
|
|
vols := []*structs.CSIVolume{
|
|
|
|
{
|
|
|
|
ID: id0,
|
2020-04-02 14:13:41 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2020-02-13 15:18:55 +00:00
|
|
|
PluginID: "minnie",
|
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
|
|
|
ControllerRequired: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
ID: id1,
|
2020-04-02 14:13:41 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2020-02-13 15:18:55 +00:00
|
|
|
PluginID: "adam",
|
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
|
|
|
ControllerRequired: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
err = state.CSIVolumeRegister(1002, vols)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// has controller
|
2020-04-13 14:46:43 +00:00
|
|
|
c := srv.staticEndpoints.CSIVolume
|
|
|
|
plugin, vol, err := c.volAndPluginLookup(structs.DefaultNamespace, id0)
|
2020-02-13 15:18:55 +00:00
|
|
|
require.NotNil(t, plugin)
|
|
|
|
require.NotNil(t, vol)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// no controller
|
2020-04-13 14:46:43 +00:00
|
|
|
plugin, vol, err = c.volAndPluginLookup(structs.DefaultNamespace, id1)
|
2020-02-13 15:18:55 +00:00
|
|
|
require.Nil(t, plugin)
|
2020-02-17 12:50:37 +00:00
|
|
|
require.NotNil(t, vol)
|
2020-02-13 15:18:55 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// doesn't exist
|
2020-04-13 14:46:43 +00:00
|
|
|
plugin, vol, err = c.volAndPluginLookup(structs.DefaultNamespace, id2)
|
2020-02-13 15:18:55 +00:00
|
|
|
require.Nil(t, plugin)
|
|
|
|
require.Nil(t, vol)
|
|
|
|
require.EqualError(t, err, fmt.Sprintf("volume not found: %s", id2))
|
|
|
|
}
|