open-nomad/nomad/search_endpoint_test.go

1031 lines
28 KiB
Go
Raw Normal View History

package nomad
import (
"strconv"
"strings"
"testing"
2017-08-10 19:24:11 +00:00
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
2017-10-10 22:04:23 +00:00
"github.com/hashicorp/nomad/acl"
csi: CLI for volume status, registration/deregistration and plugin status (#7193) * command/csi: csi, csi_plugin, csi_volume * helper/funcs: move ExtraKeys from parse_config to UnusedKeys * command/agent/config_parse: use helper.UnusedKeys * api/csi: annotate CSIVolumes with hcl fields * command/csi_plugin: add Synopsis * command/csi_volume_register: use hcl.Decode style parsing * command/csi_volume_list * command/csi_volume_status: list format, cleanup * command/csi_plugin_list * command/csi_plugin_status * command/csi_volume_deregister * command/csi_volume: add Synopsis * api/contexts/contexts: add csi search contexts to the constants * command/commands: register csi commands * api/csi: fix struct tag for linter * command/csi_plugin_list: unused struct vars * command/csi_plugin_status: unused struct vars * command/csi_volume_list: unused struct vars * api/csi: add allocs to CSIPlugin * command/csi_plugin_status: format the allocs * api/allocations: copy Allocation.Stub in from structs * nomad/client_rpc: add some error context with Errorf * api/csi: collapse read & write alloc maps to a stub list * command/csi_volume_status: cleanup allocation display * command/csi_volume_list: use Schedulable instead of Healthy * command/csi_volume_status: use Schedulable instead of Healthy * command/csi_volume_list: sprintf string * command/csi: delete csi.go, csi_plugin.go * command/plugin: refactor csi components to sub-command plugin status * command/plugin: remove csi * command/plugin_status: remove csi * command/volume: remove csi * command/volume_status: split out csi specific * helper/funcs: add RemoveEqualFold * command/agent/config_parse: use helper.RemoveEqualFold * api/csi: do ,unusedKeys right * command/volume: refactor csi components to `nomad volume` * command/volume_register: split out csi specific * command/commands: use the new top level commands * command/volume_deregister: hardwired type csi for now * command/volume_status: csiFormatVolumes rescued from volume_list * command/plugin_status: avoid a panic on no args * command/volume_status: avoid a panic on no args * command/plugin_status: predictVolumeType * command/volume_status: predictVolumeType * nomad/csi_endpoint_test: move CreateTestPlugin to testing * command/plugin_status_test: use CreateTestCSIPlugin * nomad/structs/structs: add CSIPlugins and CSIVolumes search consts * nomad/state/state_store: add CSIPlugins and CSIVolumesByIDPrefix * nomad/search_endpoint: add CSIPlugins and CSIVolumes * command/plugin_status: move the header to the csi specific * command/volume_status: move the header to the csi specific * nomad/state/state_store: CSIPluginByID prefix * command/status: rename the search context to just Plugins/Volumes * command/plugin,volume_status: test return ids now * command/status: rename the search context to just Plugins/Volumes * command/plugin_status: support -json and -t * command/volume_status: support -json and -t * command/plugin_status_csi: comments * command/*_status: clean up text * api/csi: fix stale comments * command/volume: make deregister sound less fearsome * command/plugin_status: set the id length * command/plugin_status_csi: more compact plugin health * command/volume: better error message, comment
2020-03-06 15:09:10 +00:00
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert"
csi: CLI for volume status, registration/deregistration and plugin status (#7193) * command/csi: csi, csi_plugin, csi_volume * helper/funcs: move ExtraKeys from parse_config to UnusedKeys * command/agent/config_parse: use helper.UnusedKeys * api/csi: annotate CSIVolumes with hcl fields * command/csi_plugin: add Synopsis * command/csi_volume_register: use hcl.Decode style parsing * command/csi_volume_list * command/csi_volume_status: list format, cleanup * command/csi_plugin_list * command/csi_plugin_status * command/csi_volume_deregister * command/csi_volume: add Synopsis * api/contexts/contexts: add csi search contexts to the constants * command/commands: register csi commands * api/csi: fix struct tag for linter * command/csi_plugin_list: unused struct vars * command/csi_plugin_status: unused struct vars * command/csi_volume_list: unused struct vars * api/csi: add allocs to CSIPlugin * command/csi_plugin_status: format the allocs * api/allocations: copy Allocation.Stub in from structs * nomad/client_rpc: add some error context with Errorf * api/csi: collapse read & write alloc maps to a stub list * command/csi_volume_status: cleanup allocation display * command/csi_volume_list: use Schedulable instead of Healthy * command/csi_volume_status: use Schedulable instead of Healthy * command/csi_volume_list: sprintf string * command/csi: delete csi.go, csi_plugin.go * command/plugin: refactor csi components to sub-command plugin status * command/plugin: remove csi * command/plugin_status: remove csi * command/volume: remove csi * command/volume_status: split out csi specific * helper/funcs: add RemoveEqualFold * command/agent/config_parse: use helper.RemoveEqualFold * api/csi: do ,unusedKeys right * command/volume: refactor csi components to `nomad volume` * command/volume_register: split out csi specific * command/commands: use the new top level commands * command/volume_deregister: hardwired type csi for now * command/volume_status: csiFormatVolumes rescued from volume_list * command/plugin_status: avoid a panic on no args * command/volume_status: avoid a panic on no args * command/plugin_status: predictVolumeType * command/volume_status: predictVolumeType * nomad/csi_endpoint_test: move CreateTestPlugin to testing * command/plugin_status_test: use CreateTestCSIPlugin * nomad/structs/structs: add CSIPlugins and CSIVolumes search consts * nomad/state/state_store: add CSIPlugins and CSIVolumesByIDPrefix * nomad/search_endpoint: add CSIPlugins and CSIVolumes * command/plugin_status: move the header to the csi specific * command/volume_status: move the header to the csi specific * nomad/state/state_store: CSIPluginByID prefix * command/status: rename the search context to just Plugins/Volumes * command/plugin,volume_status: test return ids now * command/status: rename the search context to just Plugins/Volumes * command/plugin_status: support -json and -t * command/volume_status: support -json and -t * command/plugin_status_csi: comments * command/*_status: clean up text * api/csi: fix stale comments * command/volume: make deregister sound less fearsome * command/plugin_status: set the id length * command/plugin_status_csi: more compact plugin health * command/volume: better error message, comment
2020-03-06 15:09:10 +00:00
"github.com/stretchr/testify/require"
)
const jobIndex = 1000
2017-09-07 23:56:15 +00:00
func registerAndVerifyJob(s *Server, t *testing.T, prefix string, counter int) *structs.Job {
job := mock.Job()
2017-08-01 21:06:52 +00:00
job.ID = prefix + strconv.Itoa(counter)
state := s.fsm.State()
if err := state.UpsertJob(structs.MsgTypeTestSetup, jobIndex, job); err != nil {
t.Fatalf("err: %v", err)
}
2017-09-07 23:56:15 +00:00
return job
}
func TestSearch_PrefixSearch_Job(t *testing.T) {
t.Parallel()
assert := assert.New(t)
2017-08-01 21:06:52 +00:00
prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970"
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
2017-09-07 23:56:15 +00:00
job := registerAndVerifyJob(s, t, prefix, 0)
2017-08-10 19:24:11 +00:00
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Jobs,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
2017-08-10 19:24:11 +00:00
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
2017-09-07 23:56:15 +00:00
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.Equal(uint64(jobIndex), resp.Index)
}
2017-08-01 21:28:53 +00:00
2017-10-10 22:04:23 +00:00
func TestSearch_PrefixSearch_ACL(t *testing.T) {
t.Parallel()
2017-10-10 22:04:23 +00:00
assert := assert.New(t)
jobID := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970"
s, root, cleanupS := TestACLServer(t, func(c *Config) {
2017-10-10 22:04:23 +00:00
c.NumSchedulers = 0
})
defer cleanupS()
2017-10-10 22:04:23 +00:00
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
state := s.fsm.State()
job := registerAndVerifyJob(s, t, jobID, 0)
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node()))
2017-10-10 22:04:23 +00:00
req := &structs.SearchRequest{
Prefix: "",
Context: structs.Jobs,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Try without a token and expect failure
{
var resp structs.SearchResponse
err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)
assert.NotNil(err)
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with an invalid token and expect failure
{
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-10-12 22:16:33 +00:00
req.AuthToken = invalidToken.SecretID
2017-10-10 22:04:23 +00:00
var resp structs.SearchResponse
err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)
assert.NotNil(err)
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a node:read token and expect failure due to Jobs being the context
{
validToken := mock.CreatePolicyAndToken(t, state, 1005, "test-invalid2", mock.NodePolicy(acl.PolicyRead))
2017-10-12 22:16:33 +00:00
req.AuthToken = validToken.SecretID
2017-10-10 22:04:23 +00:00
var resp structs.SearchResponse
err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)
assert.NotNil(err)
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a node:read token and expect success due to All context
{
validToken := mock.CreatePolicyAndToken(t, state, 1007, "test-valid", mock.NodePolicy(acl.PolicyRead))
req.Context = structs.All
2017-10-12 22:16:33 +00:00
req.AuthToken = validToken.SecretID
2017-10-10 22:04:23 +00:00
var resp structs.SearchResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
assert.Equal(uint64(1001), resp.Index)
assert.Len(resp.Matches[structs.Nodes], 1)
// Jobs filtered out since token only has access to node:read
assert.Len(resp.Matches[structs.Jobs], 0)
}
// Try with a valid token for namespace:read-job
{
validToken := mock.CreatePolicyAndToken(t, state, 1009, "test-valid2",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
2017-10-12 22:16:33 +00:00
req.AuthToken = validToken.SecretID
2017-10-10 22:04:23 +00:00
var resp structs.SearchResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
assert.Len(resp.Matches[structs.Jobs], 1)
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
// Index of job - not node - because node context is filtered out
assert.Equal(uint64(1000), resp.Index)
2017-10-10 22:04:23 +00:00
// Nodes filtered out since token only has access to namespace:read-job
assert.Len(resp.Matches[structs.Nodes], 0)
}
// Try with a valid token for node:read and namespace:read-job
{
validToken := mock.CreatePolicyAndToken(t, state, 1011, "test-valid3", strings.Join([]string{
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}),
mock.NodePolicy(acl.PolicyRead),
}, "\n"))
2017-10-12 22:16:33 +00:00
req.AuthToken = validToken.SecretID
2017-10-10 22:04:23 +00:00
var resp structs.SearchResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
assert.Len(resp.Matches[structs.Jobs], 1)
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.Len(resp.Matches[structs.Nodes], 1)
assert.Equal(uint64(1001), resp.Index)
2017-10-10 22:04:23 +00:00
}
// Try with a management token
{
2017-10-12 22:16:33 +00:00
req.AuthToken = root.SecretID
2017-10-10 22:04:23 +00:00
var resp structs.SearchResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
assert.Equal(uint64(1001), resp.Index)
assert.Len(resp.Matches[structs.Jobs], 1)
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.Len(resp.Matches[structs.Nodes], 1)
}
}
2017-08-29 16:21:18 +00:00
func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) {
t.Parallel()
2017-08-29 16:21:18 +00:00
assert := assert.New(t)
prefix := "example-test-------" // Assert that a job with more than 4 hyphens works
2017-08-29 16:21:18 +00:00
s, cleanupS := TestServer(t, func(c *Config) {
2017-08-29 16:21:18 +00:00
c.NumSchedulers = 0
})
defer cleanupS()
2017-08-29 16:21:18 +00:00
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
// Register a job and an allocation
2017-09-07 23:56:15 +00:00
job := registerAndVerifyJob(s, t, prefix, 0)
2017-08-29 16:21:18 +00:00
alloc := mock.Alloc()
2017-09-07 23:56:15 +00:00
alloc.JobID = job.ID
alloc.Namespace = job.Namespace
2017-08-29 16:21:18 +00:00
summary := mock.JobSummary(alloc.JobID)
state := s.fsm.State()
if err := state.UpsertJobSummary(999, summary); err != nil {
t.Fatalf("err: %v", err)
}
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil {
2017-08-29 16:21:18 +00:00
t.Fatalf("err: %v", err)
}
req := &structs.SearchRequest{
Context: structs.All,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
2017-08-29 16:21:18 +00:00
}
// req.Prefix = "example-te": 9
for i := 1; i < len(prefix); i++ {
req.Prefix = prefix[:i]
var resp structs.SearchResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
assert.Equal(1, len(resp.Matches[structs.Jobs]))
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.EqualValues(jobIndex, resp.Index)
}
}
func TestSearch_PrefixSearch_All_LongJob(t *testing.T) {
t.Parallel()
assert := assert.New(t)
prefix := strings.Repeat("a", 100)
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
// Register a job and an allocation
2017-09-07 23:56:15 +00:00
job := registerAndVerifyJob(s, t, prefix, 0)
alloc := mock.Alloc()
2017-09-07 23:56:15 +00:00
alloc.JobID = job.ID
summary := mock.JobSummary(alloc.JobID)
state := s.fsm.State()
if err := state.UpsertJobSummary(999, summary); err != nil {
t.Fatalf("err: %v", err)
}
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil {
t.Fatalf("err: %v", err)
}
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.All,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.SearchResponse
2017-08-29 16:21:18 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
2017-09-07 23:56:15 +00:00
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
2017-08-29 16:21:18 +00:00
assert.EqualValues(jobIndex, resp.Index)
}
// truncate should limit results to 20
func TestSearch_PrefixSearch_Truncate(t *testing.T) {
t.Parallel()
assert := assert.New(t)
2017-08-01 21:28:53 +00:00
prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970"
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
2017-08-01 21:28:53 +00:00
})
defer cleanupS()
2017-08-01 21:28:53 +00:00
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
2017-09-07 23:56:15 +00:00
var job *structs.Job
2017-08-01 21:28:53 +00:00
for counter := 0; counter < 25; counter++ {
2017-09-07 23:56:15 +00:00
job = registerAndVerifyJob(s, t, prefix, counter)
2017-08-01 21:28:53 +00:00
}
2017-08-10 19:24:11 +00:00
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Jobs,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
2017-08-01 21:28:53 +00:00
}
2017-08-10 19:24:11 +00:00
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
2017-08-01 21:28:53 +00:00
t.Fatalf("err: %v", err)
}
assert.Equal(20, len(resp.Matches[structs.Jobs]))
assert.Equal(resp.Truncations[structs.Jobs], true)
assert.Equal(uint64(jobIndex), resp.Index)
2017-08-01 21:28:53 +00:00
}
func TestSearch_PrefixSearch_AllWithJob(t *testing.T) {
t.Parallel()
assert := assert.New(t)
prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970"
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
2017-09-07 23:56:15 +00:00
job := registerAndVerifyJob(s, t, prefix, 0)
eval1 := mock.Eval()
2017-09-07 23:56:15 +00:00
eval1.ID = job.ID
s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.All,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
2017-09-07 23:56:15 +00:00
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.Equal(1, len(resp.Matches[structs.Evals]))
assert.Equal(eval1.ID, resp.Matches[structs.Evals][0])
}
func TestSearch_PrefixSearch_Evals(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
eval1 := mock.Eval()
s.fsm.State().UpsertEvals(structs.MsgTypeTestSetup, 2000, []*structs.Evaluation{eval1})
prefix := eval1.ID[:len(eval1.ID)-2]
2017-08-10 19:24:11 +00:00
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Evals,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
}
2017-08-10 19:24:11 +00:00
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Evals]))
assert.Equal(eval1.ID, resp.Matches[structs.Evals][0])
assert.Equal(resp.Truncations[structs.Evals], false)
assert.Equal(uint64(2000), resp.Index)
}
func TestSearch_PrefixSearch_Allocation(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
alloc := mock.Alloc()
summary := mock.JobSummary(alloc.JobID)
state := s.fsm.State()
if err := state.UpsertJobSummary(999, summary); err != nil {
t.Fatalf("err: %v", err)
}
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 90, []*structs.Allocation{alloc}); err != nil {
t.Fatalf("err: %v", err)
}
prefix := alloc.ID[:len(alloc.ID)-2]
2017-08-10 19:24:11 +00:00
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Allocs,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: alloc.Namespace,
},
}
2017-08-10 19:24:11 +00:00
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Allocs]))
assert.Equal(alloc.ID, resp.Matches[structs.Allocs][0])
assert.Equal(resp.Truncations[structs.Allocs], false)
assert.Equal(uint64(90), resp.Index)
}
func TestSearch_PrefixSearch_All_UUID(t *testing.T) {
2017-08-29 16:21:18 +00:00
t.Parallel()
assert := assert.New(t)
s, cleanupS := TestServer(t, func(c *Config) {
2017-08-29 16:21:18 +00:00
c.NumSchedulers = 0
})
defer cleanupS()
2017-08-29 16:21:18 +00:00
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
alloc := mock.Alloc()
summary := mock.JobSummary(alloc.JobID)
state := s.fsm.State()
if err := state.UpsertJobSummary(999, summary); err != nil {
t.Fatalf("err: %v", err)
}
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc}); err != nil {
2017-08-29 16:21:18 +00:00
t.Fatalf("err: %v", err)
}
node := mock.Node()
if err := state.UpsertNode(structs.MsgTypeTestSetup, 1001, node); err != nil {
2017-08-29 16:21:18 +00:00
t.Fatalf("err: %v", err)
}
eval1 := mock.Eval()
eval1.ID = node.ID
if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1002, []*structs.Evaluation{eval1}); err != nil {
2017-08-29 16:21:18 +00:00
t.Fatalf("err: %v", err)
}
req := &structs.SearchRequest{
Context: structs.All,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
2017-08-29 16:21:18 +00:00
}
for i := 1; i < len(alloc.ID); i++ {
req.Prefix = alloc.ID[:i]
var resp structs.SearchResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
assert.Equal(1, len(resp.Matches[structs.Allocs]))
assert.Equal(alloc.ID, resp.Matches[structs.Allocs][0])
assert.Equal(resp.Truncations[structs.Allocs], false)
assert.EqualValues(1002, resp.Index)
2017-08-29 16:21:18 +00:00
}
}
func TestSearch_PrefixSearch_Node(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
state := s.fsm.State()
node := mock.Node()
if err := state.UpsertNode(structs.MsgTypeTestSetup, 100, node); err != nil {
t.Fatalf("err: %v", err)
}
prefix := node.ID[:len(node.ID)-2]
2017-08-10 19:24:11 +00:00
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Nodes,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
2017-08-10 19:24:11 +00:00
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Nodes]))
assert.Equal(node.ID, resp.Matches[structs.Nodes][0])
assert.Equal(false, resp.Truncations[structs.Nodes])
assert.Equal(uint64(100), resp.Index)
}
2017-08-18 20:06:25 +00:00
func TestSearch_PrefixSearch_Deployment(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s, cleanupS := TestServer(t, func(c *Config) {
2017-08-18 20:06:25 +00:00
c.NumSchedulers = 0
})
defer cleanupS()
2017-08-18 20:06:25 +00:00
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
deployment := mock.Deployment()
s.fsm.State().UpsertDeployment(2000, deployment)
prefix := deployment.ID[:len(deployment.ID)-2]
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Deployments,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: deployment.Namespace,
},
2017-08-18 20:06:25 +00:00
}
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Deployments]))
assert.Equal(deployment.ID, resp.Matches[structs.Deployments][0])
assert.Equal(resp.Truncations[structs.Deployments], false)
assert.Equal(uint64(2000), resp.Index)
}
func TestSearch_PrefixSearch_AllContext(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
state := s.fsm.State()
node := mock.Node()
if err := state.UpsertNode(structs.MsgTypeTestSetup, 100, node); err != nil {
t.Fatalf("err: %v", err)
}
eval1 := mock.Eval()
eval1.ID = node.ID
if err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1}); err != nil {
t.Fatalf("err: %v", err)
}
prefix := node.ID[:len(node.ID)-2]
2017-08-10 19:24:11 +00:00
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.All,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
}
2017-08-10 19:24:11 +00:00
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Nodes]))
assert.Equal(1, len(resp.Matches[structs.Evals]))
assert.Equal(node.ID, resp.Matches[structs.Nodes][0])
assert.Equal(eval1.ID, resp.Matches[structs.Evals][0])
assert.Equal(uint64(1000), resp.Index)
}
2017-08-07 14:16:24 +00:00
// Tests that the top 20 matches are returned when no prefix is set
func TestSearch_PrefixSearch_NoPrefix(t *testing.T) {
t.Parallel()
assert := assert.New(t)
prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970"
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
2017-09-07 23:56:15 +00:00
job := registerAndVerifyJob(s, t, prefix, 0)
2017-08-10 19:24:11 +00:00
req := &structs.SearchRequest{
Prefix: "",
Context: structs.Jobs,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
2017-08-10 19:24:11 +00:00
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
2017-09-07 23:56:15 +00:00
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.Equal(uint64(jobIndex), resp.Index)
}
2017-08-08 20:29:02 +00:00
// Tests that the zero matches are returned when a prefix has no matching
// results
func TestSearch_PrefixSearch_NoMatches(t *testing.T) {
t.Parallel()
assert := assert.New(t)
prefix := "aaaaaaaa-e8f7-fd38-c855-ab94ceb8970"
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
2017-08-10 19:24:11 +00:00
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Jobs,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
2017-08-10 19:24:11 +00:00
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(0, len(resp.Matches[structs.Jobs]))
assert.Equal(uint64(0), resp.Index)
}
2017-08-08 20:29:02 +00:00
// Prefixes can only be looked up if their length is a power of two. For
// prefixes which are an odd length, use the length-1 characters.
func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) {
t.Parallel()
2017-08-08 20:29:02 +00:00
assert := assert.New(t)
id1 := "aaafaaaa-e8f7-fd38-c855-ab94ceb89"
id2 := "aaafeaaa-e8f7-fd38-c855-ab94ceb89"
prefix := "aaafa"
2017-08-08 20:29:02 +00:00
s, cleanupS := TestServer(t, func(c *Config) {
2017-08-08 20:29:02 +00:00
c.NumSchedulers = 0
})
defer cleanupS()
2017-08-08 20:29:02 +00:00
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
2017-09-07 23:56:15 +00:00
job := registerAndVerifyJob(s, t, id1, 0)
registerAndVerifyJob(s, t, id2, 50)
2017-08-08 20:29:02 +00:00
2017-08-10 19:24:11 +00:00
req := &structs.SearchRequest{
2017-08-08 20:29:02 +00:00
Prefix: prefix,
Context: structs.Jobs,
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
2017-08-08 20:29:02 +00:00
}
2017-08-10 19:24:11 +00:00
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
2017-08-08 20:29:02 +00:00
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
2017-09-07 23:56:15 +00:00
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
2017-08-08 20:29:02 +00:00
}
2017-12-20 20:17:28 +00:00
func TestSearch_PrefixSearch_MultiRegion(t *testing.T) {
t.Parallel()
2017-12-20 20:17:28 +00:00
assert := assert.New(t)
jobName := "exampleexample"
s1, cleanupS1 := TestServer(t, func(c *Config) {
2017-12-20 20:17:28 +00:00
c.NumSchedulers = 0
c.Region = "foo"
})
defer cleanupS1()
2017-12-20 20:17:28 +00:00
s2, cleanupS2 := TestServer(t, func(c *Config) {
2017-12-20 20:17:28 +00:00
c.NumSchedulers = 0
c.Region = "bar"
})
defer cleanupS2()
2017-12-20 20:17:28 +00:00
2018-01-12 01:00:30 +00:00
TestJoin(t, s1, s2)
2017-12-20 20:17:28 +00:00
testutil.WaitForLeader(t, s1.RPC)
job := registerAndVerifyJob(s1, t, jobName, 0)
req := &structs.SearchRequest{
Prefix: "",
Context: structs.Jobs,
QueryOptions: structs.QueryOptions{
Region: "foo",
Namespace: job.Namespace,
},
}
codec := rpcClient(t, s2)
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.Equal(uint64(jobIndex), resp.Index)
}
csi: CLI for volume status, registration/deregistration and plugin status (#7193) * command/csi: csi, csi_plugin, csi_volume * helper/funcs: move ExtraKeys from parse_config to UnusedKeys * command/agent/config_parse: use helper.UnusedKeys * api/csi: annotate CSIVolumes with hcl fields * command/csi_plugin: add Synopsis * command/csi_volume_register: use hcl.Decode style parsing * command/csi_volume_list * command/csi_volume_status: list format, cleanup * command/csi_plugin_list * command/csi_plugin_status * command/csi_volume_deregister * command/csi_volume: add Synopsis * api/contexts/contexts: add csi search contexts to the constants * command/commands: register csi commands * api/csi: fix struct tag for linter * command/csi_plugin_list: unused struct vars * command/csi_plugin_status: unused struct vars * command/csi_volume_list: unused struct vars * api/csi: add allocs to CSIPlugin * command/csi_plugin_status: format the allocs * api/allocations: copy Allocation.Stub in from structs * nomad/client_rpc: add some error context with Errorf * api/csi: collapse read & write alloc maps to a stub list * command/csi_volume_status: cleanup allocation display * command/csi_volume_list: use Schedulable instead of Healthy * command/csi_volume_status: use Schedulable instead of Healthy * command/csi_volume_list: sprintf string * command/csi: delete csi.go, csi_plugin.go * command/plugin: refactor csi components to sub-command plugin status * command/plugin: remove csi * command/plugin_status: remove csi * command/volume: remove csi * command/volume_status: split out csi specific * helper/funcs: add RemoveEqualFold * command/agent/config_parse: use helper.RemoveEqualFold * api/csi: do ,unusedKeys right * command/volume: refactor csi components to `nomad volume` * command/volume_register: split out csi specific * command/commands: use the new top level commands * command/volume_deregister: hardwired type csi for now * command/volume_status: csiFormatVolumes rescued from volume_list * command/plugin_status: avoid a panic on no args * command/volume_status: avoid a panic on no args * command/plugin_status: predictVolumeType * command/volume_status: predictVolumeType * nomad/csi_endpoint_test: move CreateTestPlugin to testing * command/plugin_status_test: use CreateTestCSIPlugin * nomad/structs/structs: add CSIPlugins and CSIVolumes search consts * nomad/state/state_store: add CSIPlugins and CSIVolumesByIDPrefix * nomad/search_endpoint: add CSIPlugins and CSIVolumes * command/plugin_status: move the header to the csi specific * command/volume_status: move the header to the csi specific * nomad/state/state_store: CSIPluginByID prefix * command/status: rename the search context to just Plugins/Volumes * command/plugin,volume_status: test return ids now * command/status: rename the search context to just Plugins/Volumes * command/plugin_status: support -json and -t * command/volume_status: support -json and -t * command/plugin_status_csi: comments * command/*_status: clean up text * api/csi: fix stale comments * command/volume: make deregister sound less fearsome * command/plugin_status: set the id length * command/plugin_status_csi: more compact plugin health * command/volume: better error message, comment
2020-03-06 15:09:10 +00:00
func TestSearch_PrefixSearch_CSIPlugin(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
id := uuid.Generate()
state.CreateTestCSIPlugin(s.fsm.State(), id)
csi: CLI for volume status, registration/deregistration and plugin status (#7193) * command/csi: csi, csi_plugin, csi_volume * helper/funcs: move ExtraKeys from parse_config to UnusedKeys * command/agent/config_parse: use helper.UnusedKeys * api/csi: annotate CSIVolumes with hcl fields * command/csi_plugin: add Synopsis * command/csi_volume_register: use hcl.Decode style parsing * command/csi_volume_list * command/csi_volume_status: list format, cleanup * command/csi_plugin_list * command/csi_plugin_status * command/csi_volume_deregister * command/csi_volume: add Synopsis * api/contexts/contexts: add csi search contexts to the constants * command/commands: register csi commands * api/csi: fix struct tag for linter * command/csi_plugin_list: unused struct vars * command/csi_plugin_status: unused struct vars * command/csi_volume_list: unused struct vars * api/csi: add allocs to CSIPlugin * command/csi_plugin_status: format the allocs * api/allocations: copy Allocation.Stub in from structs * nomad/client_rpc: add some error context with Errorf * api/csi: collapse read & write alloc maps to a stub list * command/csi_volume_status: cleanup allocation display * command/csi_volume_list: use Schedulable instead of Healthy * command/csi_volume_status: use Schedulable instead of Healthy * command/csi_volume_list: sprintf string * command/csi: delete csi.go, csi_plugin.go * command/plugin: refactor csi components to sub-command plugin status * command/plugin: remove csi * command/plugin_status: remove csi * command/volume: remove csi * command/volume_status: split out csi specific * helper/funcs: add RemoveEqualFold * command/agent/config_parse: use helper.RemoveEqualFold * api/csi: do ,unusedKeys right * command/volume: refactor csi components to `nomad volume` * command/volume_register: split out csi specific * command/commands: use the new top level commands * command/volume_deregister: hardwired type csi for now * command/volume_status: csiFormatVolumes rescued from volume_list * command/plugin_status: avoid a panic on no args * command/volume_status: avoid a panic on no args * command/plugin_status: predictVolumeType * command/volume_status: predictVolumeType * nomad/csi_endpoint_test: move CreateTestPlugin to testing * command/plugin_status_test: use CreateTestCSIPlugin * nomad/structs/structs: add CSIPlugins and CSIVolumes search consts * nomad/state/state_store: add CSIPlugins and CSIVolumesByIDPrefix * nomad/search_endpoint: add CSIPlugins and CSIVolumes * command/plugin_status: move the header to the csi specific * command/volume_status: move the header to the csi specific * nomad/state/state_store: CSIPluginByID prefix * command/status: rename the search context to just Plugins/Volumes * command/plugin,volume_status: test return ids now * command/status: rename the search context to just Plugins/Volumes * command/plugin_status: support -json and -t * command/volume_status: support -json and -t * command/plugin_status_csi: comments * command/*_status: clean up text * api/csi: fix stale comments * command/volume: make deregister sound less fearsome * command/plugin_status: set the id length * command/plugin_status_csi: more compact plugin health * command/volume: better error message, comment
2020-03-06 15:09:10 +00:00
prefix := id[:len(id)-2]
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Plugins,
QueryOptions: structs.QueryOptions{
Region: "global",
},
}
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Plugins]))
assert.Equal(id, resp.Matches[structs.Plugins][0])
assert.Equal(resp.Truncations[structs.Plugins], false)
}
func TestSearch_PrefixSearch_CSIVolume(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
id := uuid.Generate()
err := s.fsm.State().CSIVolumeRegister(1000, []*structs.CSIVolume{{
ID: id,
Namespace: structs.DefaultNamespace,
PluginID: "glade",
}})
require.NoError(t, err)
prefix := id[:len(id)-2]
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Volumes,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Volumes]))
assert.Equal(id, resp.Matches[structs.Volumes][0])
assert.Equal(resp.Truncations[structs.Volumes], false)
}
2020-10-21 04:16:25 +00:00
func TestSearch_PrefixSearch_Namespace(t *testing.T) {
assert := assert.New(t)
t.Parallel()
s, cleanup := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanup()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
ns := mock.Namespace()
assert.Nil(s.fsm.State().UpsertNamespaces(2000, []*structs.Namespace{ns}))
prefix := ns.Name[:len(ns.Name)-2]
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Namespaces,
QueryOptions: structs.QueryOptions{
Region: "global",
},
}
var resp structs.SearchResponse
if err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
assert.Equal(1, len(resp.Matches[structs.Namespaces]))
assert.Equal(ns.Name, resp.Matches[structs.Namespaces][0])
assert.Equal(resp.Truncations[structs.Namespaces], false)
assert.Equal(uint64(2000), resp.Index)
}
func TestSearch_PrefixSearch_Namespace_ACL(t *testing.T) {
t.Parallel()
assert := assert.New(t)
s, root, cleanup := TestACLServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanup()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
state := s.fsm.State()
ns := mock.Namespace()
assert.Nil(state.UpsertNamespaces(500, []*structs.Namespace{ns}))
job1 := mock.Job()
assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 502, job1))
job2 := mock.Job()
job2.Namespace = ns.Name
assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 504, job2))
assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1001, mock.Node()))
req := &structs.SearchRequest{
Prefix: "",
Context: structs.Jobs,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job1.Namespace,
},
}
// Try without a token and expect failure
{
var resp structs.SearchResponse
err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)
assert.NotNil(err)
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with an invalid token and expect failure
{
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
req.AuthToken = invalidToken.SecretID
var resp structs.SearchResponse
err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)
assert.NotNil(err)
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a node:read token and expect failure due to Namespaces being the context
{
validToken := mock.CreatePolicyAndToken(t, state, 1005, "test-invalid2", mock.NodePolicy(acl.PolicyRead))
req.Context = structs.Namespaces
req.AuthToken = validToken.SecretID
var resp structs.SearchResponse
err := msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp)
assert.NotNil(err)
assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
}
// Try with a node:read token and expect success due to All context
{
validToken := mock.CreatePolicyAndToken(t, state, 1007, "test-valid", mock.NodePolicy(acl.PolicyRead))
req.Context = structs.All
req.AuthToken = validToken.SecretID
var resp structs.SearchResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
assert.Equal(uint64(1001), resp.Index)
assert.Len(resp.Matches[structs.Nodes], 1)
// Jobs filtered out since token only has access to node:read
assert.Len(resp.Matches[structs.Jobs], 0)
}
// Try with a valid token for non-default namespace:read-job
{
validToken := mock.CreatePolicyAndToken(t, state, 1009, "test-valid2",
mock.NamespacePolicy(job2.Namespace, "", []string{acl.NamespaceCapabilityReadJob}))
req.Context = structs.All
req.AuthToken = validToken.SecretID
req.Namespace = job2.Namespace
var resp structs.SearchResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
assert.Len(resp.Matches[structs.Jobs], 1)
assert.Equal(job2.ID, resp.Matches[structs.Jobs][0])
assert.Len(resp.Matches[structs.Namespaces], 1)
// Index of job - not node - because node context is filtered out
assert.Equal(uint64(504), resp.Index)
// Nodes filtered out since token only has access to namespace:read-job
assert.Len(resp.Matches[structs.Nodes], 0)
}
// Try with a valid token for node:read and default namespace:read-job
{
validToken := mock.CreatePolicyAndToken(t, state, 1011, "test-valid3", strings.Join([]string{
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}),
mock.NodePolicy(acl.PolicyRead),
}, "\n"))
req.Context = structs.All
req.AuthToken = validToken.SecretID
req.Namespace = structs.DefaultNamespace
var resp structs.SearchResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
assert.Len(resp.Matches[structs.Jobs], 1)
assert.Equal(job1.ID, resp.Matches[structs.Jobs][0])
assert.Len(resp.Matches[structs.Nodes], 1)
assert.Equal(uint64(1001), resp.Index)
assert.Len(resp.Matches[structs.Namespaces], 1)
}
// Try with a management token
{
req.Context = structs.All
req.AuthToken = root.SecretID
req.Namespace = structs.DefaultNamespace
var resp structs.SearchResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
assert.Equal(uint64(1001), resp.Index)
assert.Len(resp.Matches[structs.Jobs], 1)
assert.Equal(job1.ID, resp.Matches[structs.Jobs][0])
assert.Len(resp.Matches[structs.Nodes], 1)
assert.Len(resp.Matches[structs.Namespaces], 2)
}
}
func TestSearch_PrefixSearch_ScalingPolicy(t *testing.T) {
t.Parallel()
require := require.New(t)
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
job, policy := mock.JobWithScalingPolicy()
prefix := policy.ID
state := s.fsm.State()
require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, jobIndex, job))
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.ScalingPolicies,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.SearchResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
require.Equal(1, len(resp.Matches[structs.ScalingPolicies]))
require.Equal(policy.ID, resp.Matches[structs.ScalingPolicies][0])
require.Equal(uint64(jobIndex), resp.Index)
req.Context = structs.All
require.NoError(msgpackrpc.CallWithCodec(codec, "Search.PrefixSearch", req, &resp))
require.Equal(1, len(resp.Matches[structs.ScalingPolicies]))
require.Equal(policy.ID, resp.Matches[structs.ScalingPolicies][0])
require.Equal(uint64(jobIndex), resp.Index)
}