open-nomad/nomad/job_endpoint_test.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

8418 lines
236 KiB
Go
Raw Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package nomad
import (
"errors"
2016-08-17 00:50:14 +00:00
"fmt"
"reflect"
2016-04-13 22:55:46 +00:00
"strings"
"testing"
"time"
"github.com/hashicorp/go-memdb"
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
2017-09-14 18:29:55 +00:00
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper/pointer"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/hashicorp/raft"
"github.com/kr/pretty"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/slices"
)
func TestJobEndpoint_Register(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
2016-06-12 23:36:49 +00:00
serviceName := out.TaskGroups[0].Tasks[0].Services[0].Name
2015-12-01 06:13:02 +00:00
expectedServiceName := "web-frontend"
2015-11-30 10:27:26 +00:00
if serviceName != expectedServiceName {
t.Fatalf("Expected Service Name: %s, Actual: %s", expectedServiceName, serviceName)
}
// Lookup the evaluation
2017-02-08 05:22:48 +00:00
eval, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != job.Priority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != job.Type {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobRegister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
if eval.CreateTime == 0 {
t.Fatalf("eval CreateTime is unset: %#v", eval)
}
if eval.ModifyTime == 0 {
t.Fatalf("eval ModifyTime is unset: %#v", eval)
}
}
// TestJobEndpoint_Register_NonOverlapping asserts that ClientStatus must be
// terminal, not just DesiredStatus, for the resources used by a job to be
// considered free for subsequent placements to use.
//
// See: https://github.com/hashicorp/nomad/issues/10440
func TestJobEndpoint_Register_NonOverlapping(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
})
defer cleanupS1()
state := s1.fsm.State()
// Create a mock node with easy to check resources
node := mock.Node()
node.Resources = nil // Deprecated in 0.9
node.NodeResources.Cpu.CpuShares = 700
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1, node))
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.TaskGroups[0].Count = 1
req := &structs.JobRegisterRequest{
Job: job.Copy(),
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
must.NonZero(t, resp.Index)
// Assert placement
jobReq := &structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var alloc *structs.AllocListStub
testutil.Wait(t, func() (bool, error) {
resp := structs.JobAllocationsResponse{}
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Allocations", jobReq, &resp))
if n := len(resp.Allocations); n != 1 {
return false, fmt.Errorf("expected 1 allocation but found %d:\n%v", n, resp.Allocations)
}
alloc = resp.Allocations[0]
return true, nil
})
must.Eq(t, alloc.NodeID, node.ID)
must.Eq(t, alloc.DesiredStatus, structs.AllocDesiredStatusRun)
must.Eq(t, alloc.ClientStatus, structs.AllocClientStatusPending)
// Stop
stopReq := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: false,
WriteRequest: req.WriteRequest,
}
var stopResp structs.JobDeregisterResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Deregister", stopReq, &stopResp))
// Wait until the Stop is complete
testutil.Wait(t, func() (bool, error) {
eval, err := state.EvalByID(nil, stopResp.EvalID)
must.NoError(t, err)
if eval == nil {
return false, fmt.Errorf("eval not applied: %s", resp.EvalID)
}
return eval.Status == structs.EvalStatusComplete, fmt.Errorf("expected eval to be complete but found: %s", eval.Status)
})
// Assert new register blocked
req.Job = job.Copy()
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
must.NonZero(t, resp.Index)
blockedEval := ""
testutil.Wait(t, func() (bool, error) {
// Assert no new allocs
allocResp := structs.JobAllocationsResponse{}
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Allocations", jobReq, &allocResp))
if n := len(allocResp.Allocations); n != 1 {
return false, fmt.Errorf("expected 1 allocation but found %d:\n%v", n, allocResp.Allocations)
}
if alloc.ID != allocResp.Allocations[0].ID {
return false, fmt.Errorf("unexpected change in alloc: %#v", *allocResp.Allocations[0])
}
eval, err := state.EvalByID(nil, resp.EvalID)
must.NoError(t, err)
if eval == nil {
return false, fmt.Errorf("eval not applied: %s", resp.EvalID)
}
if eval.Status != structs.EvalStatusComplete {
return false, fmt.Errorf("expected eval to be complete but found: %s", eval.Status)
}
if eval.BlockedEval == "" {
return false, fmt.Errorf("expected a blocked eval to be created")
}
blockedEval = eval.BlockedEval
return true, nil
})
// Set ClientStatus=complete like a client would
stoppedAlloc := &structs.Allocation{
ID: alloc.ID,
NodeID: alloc.NodeID,
TaskStates: map[string]*structs.TaskState{
"web": &structs.TaskState{
State: structs.TaskStateDead,
},
},
ClientStatus: structs.AllocClientStatusComplete,
DeploymentStatus: nil, // should not have an impact
NetworkStatus: nil, // should not have an impact
}
upReq := &structs.AllocUpdateRequest{
Alloc: []*structs.Allocation{stoppedAlloc},
WriteRequest: req.WriteRequest,
}
var upResp structs.GenericResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", upReq, &upResp))
// Assert newer register's eval unblocked
testutil.Wait(t, func() (bool, error) {
eval, err := state.EvalByID(nil, blockedEval)
must.NoError(t, err)
must.NotNil(t, eval)
if eval.Status != structs.EvalStatusComplete {
return false, fmt.Errorf("expected blocked eval to be complete but found: %s", eval.Status)
}
return true, nil
})
// Assert new alloc placed
testutil.Wait(t, func() (bool, error) {
// Assert no new allocs
allocResp := structs.JobAllocationsResponse{}
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Allocations", jobReq, &allocResp))
if n := len(allocResp.Allocations); n != 2 {
return false, fmt.Errorf("expected 2 allocs but found %d:\n%v", n, allocResp.Allocations)
}
slices.SortFunc(allocResp.Allocations, func(a, b *structs.AllocListStub) bool {
return a.CreateIndex < b.CreateIndex
})
if alloc.ID != allocResp.Allocations[0].ID {
return false, fmt.Errorf("unexpected change in alloc: %#v", *allocResp.Allocations[0])
}
if cs := allocResp.Allocations[0].ClientStatus; cs != structs.AllocClientStatusComplete {
return false, fmt.Errorf("expected old alloc to be complete but found: %s", cs)
}
if cs := allocResp.Allocations[1].ClientStatus; cs != structs.AllocClientStatusPending {
return false, fmt.Errorf("expected new alloc to be pending but found: %s", cs)
}
return true, nil
})
}
func TestJobEndpoint_Register_PreserveCounts(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.TaskGroups[0].Name = "group1"
job.TaskGroups[0].Count = 10
job.Canonicalize()
// Register the job
require.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}, &structs.JobRegisterResponse{}))
// Check the job in the FSM state
state := s1.fsm.State()
out, err := state.JobByID(nil, job.Namespace, job.ID)
require.NoError(err)
require.NotNil(out)
require.Equal(10, out.TaskGroups[0].Count)
// New version:
// new "group2" with 2 instances
// "group1" goes from 10 -> 0 in the spec
job = job.Copy()
job.TaskGroups[0].Count = 0 // 10 -> 0 in the job spec
job.TaskGroups = append(job.TaskGroups, job.TaskGroups[0].Copy())
job.TaskGroups[1].Name = "group2"
job.TaskGroups[1].Count = 2
// Perform the update
require.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", &structs.JobRegisterRequest{
Job: job,
PreserveCounts: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}, &structs.JobRegisterResponse{}))
// Check the job in the FSM state
out, err = state.JobByID(nil, job.Namespace, job.ID)
require.NoError(err)
require.NotNil(out)
require.Equal(10, out.TaskGroups[0].Count) // should not change
require.Equal(2, out.TaskGroups[1].Count) // should be as in job spec
}
func TestJobEndpoint_Register_EvalPriority(t *testing.T) {
ci.Parallel(t)
requireAssert := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 })
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.TaskGroups[0].Name = "group1"
job.Canonicalize()
// Register the job.
requireAssert.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
EvalPriority: 99,
}, &structs.JobRegisterResponse{}))
// Grab the eval from the state, and check its priority is as expected.
state := s1.fsm.State()
out, err := state.EvalsByJob(nil, job.Namespace, job.ID)
requireAssert.NoError(err)
requireAssert.Len(out, 1)
requireAssert.Equal(99, out[0].Priority)
}
func TestJobEndpoint_Register_Connect(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
2020-08-28 17:15:26 +00:00
job.TaskGroups[0].Tasks[0].Services = nil
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
job.TaskGroups[0].Networks = structs.Networks{{
Mode: "bridge",
}}
job.TaskGroups[0].Services = []*structs.Service{{
Name: "backend",
PortLabel: "8080",
Connect: &structs.ConsulConnect{
SidecarService: &structs.ConsulSidecarService{},
}},
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
require.NotZero(resp.Index)
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
require.NoError(err)
require.NotNil(out)
require.Equal(resp.JobModifyIndex, out.CreateIndex)
// Check that the sidecar task was injected
require.Len(out.TaskGroups[0].Tasks, 2)
sidecarTask := out.TaskGroups[0].Tasks[1]
require.Equal("connect-proxy-backend", sidecarTask.Name)
require.Equal("connect-proxy:backend", string(sidecarTask.Kind))
require.Equal("connect-proxy-backend", out.TaskGroups[0].Networks[0].DynamicPorts[0].Label)
// Check that round tripping the job doesn't change the sidecarTask
out.Meta["test"] = "abc"
req.Job = out
require.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
require.NotZero(resp.Index)
// Check for the new node in the FSM
state = s1.fsm.State()
ws = memdb.NewWatchSet()
out, err = state.JobByID(ws, job.Namespace, job.ID)
require.NoError(err)
require.NotNil(out)
require.Equal(resp.JobModifyIndex, out.CreateIndex)
require.Len(out.TaskGroups[0].Tasks, 2)
require.Exactly(sidecarTask, out.TaskGroups[0].Tasks[1])
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
}
func TestJobEndpoint_Register_ConnectIngressGateway_minimum(t *testing.T) {
ci.Parallel(t)
r := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// job contains the minimalist possible gateway service definition
job := mock.ConnectIngressGatewayJob("host", false)
// Create the register request
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
r.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
r.NotZero(resp.Index)
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
r.NoError(err)
r.NotNil(out)
r.Equal(resp.JobModifyIndex, out.CreateIndex)
// Check that the gateway task got injected
r.Len(out.TaskGroups[0].Tasks, 1)
task := out.TaskGroups[0].Tasks[0]
r.Equal("connect-ingress-my-ingress-service", task.Name)
r.Equal("connect-ingress:my-ingress-service", string(task.Kind))
r.Equal("docker", task.Driver)
r.NotNil(task.Config)
// Check the CE fields got set
service := out.TaskGroups[0].Services[0]
r.Equal(&structs.ConsulIngressConfigEntry{
TLS: nil,
Listeners: []*structs.ConsulIngressListener{{
Port: 2000,
Protocol: "tcp",
Services: []*structs.ConsulIngressService{{
Name: "service1",
}},
}},
}, service.Connect.Gateway.Ingress)
// Check that round-tripping does not inject a duplicate task
out.Meta["test"] = "abc"
req.Job = out
r.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
r.NotZero(resp.Index)
// Check for the new node in the fsm
state = s1.fsm.State()
ws = memdb.NewWatchSet()
out, err = state.JobByID(ws, job.Namespace, job.ID)
r.NoError(err)
r.NotNil(out)
r.Equal(resp.JobModifyIndex, out.CreateIndex)
// Check we did not re-add the task that was added the first time
r.Len(out.TaskGroups[0].Tasks, 1)
}
func TestJobEndpoint_Register_ConnectIngressGateway_full(t *testing.T) {
ci.Parallel(t)
r := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// reconfigure job to fill in all the possible fields
job := mock.ConnectIngressGatewayJob("bridge", false)
job.TaskGroups[0].Services[0].Connect = &structs.ConsulConnect{
Gateway: &structs.ConsulGateway{
Proxy: &structs.ConsulGatewayProxy{
ConnectTimeout: pointer.Of(1 * time.Second),
EnvoyGatewayBindTaggedAddresses: true,
EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{
"service1": {
Address: "10.0.0.1",
Port: 2001,
},
"service2": {
Address: "10.0.0.2",
Port: 2002,
},
},
EnvoyGatewayNoDefaultBind: true,
Config: map[string]interface{}{
"foo": 1,
"bar": "baz",
},
},
Ingress: &structs.ConsulIngressConfigEntry{
TLS: &structs.ConsulGatewayTLSConfig{
Enabled: true,
},
Listeners: []*structs.ConsulIngressListener{{
Port: 3000,
Protocol: "tcp",
Services: []*structs.ConsulIngressService{{
Name: "db",
}},
}, {
Port: 3001,
Protocol: "http",
Services: []*structs.ConsulIngressService{{
Name: "website",
Hosts: []string{"10.0.1.0", "10.0.1.0:3001"},
}},
}},
},
},
}
// Create the register request
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
r.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
r.NotZero(resp.Index)
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
r.NoError(err)
r.NotNil(out)
r.Equal(resp.JobModifyIndex, out.CreateIndex)
// Check that the gateway task got injected
r.Len(out.TaskGroups[0].Tasks, 1)
task := out.TaskGroups[0].Tasks[0]
r.Equal("connect-ingress-my-ingress-service", task.Name)
r.Equal("connect-ingress:my-ingress-service", string(task.Kind))
r.Equal("docker", task.Driver)
r.NotNil(task.Config)
// Check that the ingress service is all set
service := out.TaskGroups[0].Services[0]
r.Equal("my-ingress-service", service.Name)
r.Equal(&structs.ConsulIngressConfigEntry{
TLS: &structs.ConsulGatewayTLSConfig{
Enabled: true,
},
Listeners: []*structs.ConsulIngressListener{{
Port: 3000,
Protocol: "tcp",
Services: []*structs.ConsulIngressService{{
Name: "db",
}},
}, {
Port: 3001,
Protocol: "http",
Services: []*structs.ConsulIngressService{{
Name: "website",
Hosts: []string{"10.0.1.0", "10.0.1.0:3001"},
}},
}},
}, service.Connect.Gateway.Ingress)
// Check that round-tripping does not inject a duplicate task
out.Meta["test"] = "abc"
req.Job = out
r.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
r.NotZero(resp.Index)
// Check for the new node in the fsm
state = s1.fsm.State()
ws = memdb.NewWatchSet()
out, err = state.JobByID(ws, job.Namespace, job.ID)
r.NoError(err)
r.NotNil(out)
r.Equal(resp.JobModifyIndex, out.CreateIndex)
// Check we did not re-add the task that was added the first time
r.Len(out.TaskGroups[0].Tasks, 1)
}
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
func TestJobEndpoint_Register_ConnectExposeCheck(t *testing.T) {
ci.Parallel(t)
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
r := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Setup the job we are going to register
job := mock.Job()
2020-08-28 17:15:26 +00:00
job.TaskGroups[0].Tasks[0].Services = nil
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
job.TaskGroups[0].Networks = structs.Networks{{
Mode: "bridge",
DynamicPorts: []structs.Port{{
Label: "hcPort",
To: -1,
}, {
Label: "v2Port",
To: -1,
}},
}}
job.TaskGroups[0].Services = []*structs.Service{{
Name: "backend",
PortLabel: "8080",
Checks: []*structs.ServiceCheck{{
Name: "check1",
Type: "http",
Protocol: "http",
Path: "/health",
Expose: true,
PortLabel: "hcPort",
Interval: 1 * time.Second,
Timeout: 1 * time.Second,
}, {
Name: "check2",
Type: "script",
Command: "/bin/true",
TaskName: "web",
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
Interval: 1 * time.Second,
Timeout: 1 * time.Second,
}, {
Name: "check3",
Type: "grpc",
Protocol: "grpc",
Path: "/v2/health",
Expose: true,
PortLabel: "v2Port",
Interval: 1 * time.Second,
Timeout: 1 * time.Second,
}},
Connect: &structs.ConsulConnect{
SidecarService: &structs.ConsulSidecarService{}},
}}
// Create the register request
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
r.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
r.NotZero(resp.Index)
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
r.NoError(err)
r.NotNil(out)
r.Equal(resp.JobModifyIndex, out.CreateIndex)
// Check that the new expose paths got created
r.Len(out.TaskGroups[0].Services[0].Connect.SidecarService.Proxy.Expose.Paths, 2)
httpPath := out.TaskGroups[0].Services[0].Connect.SidecarService.Proxy.Expose.Paths[0]
r.Equal(structs.ConsulExposePath{
Path: "/health",
Protocol: "http",
LocalPathPort: 8080,
ListenerPort: "hcPort",
}, httpPath)
grpcPath := out.TaskGroups[0].Services[0].Connect.SidecarService.Proxy.Expose.Paths[1]
r.Equal(structs.ConsulExposePath{
Path: "/v2/health",
Protocol: "grpc",
LocalPathPort: 8080,
ListenerPort: "v2Port",
}, grpcPath)
// make sure round tripping does not create duplicate expose paths
out.Meta["test"] = "abc"
req.Job = out
r.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
r.NotZero(resp.Index)
// Check for the new node in the FSM
state = s1.fsm.State()
ws = memdb.NewWatchSet()
out, err = state.JobByID(ws, job.Namespace, job.ID)
r.NoError(err)
r.NotNil(out)
r.Equal(resp.JobModifyIndex, out.CreateIndex)
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
// make sure we are not re-adding what has already been added
r.Len(out.TaskGroups[0].Services[0].Connect.SidecarService.Proxy.Expose.Paths, 2)
}
func TestJobEndpoint_Register_ConnectWithSidecarTask(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.TaskGroups[0].Networks = structs.Networks{
{
Mode: "bridge",
},
}
2020-08-28 17:15:26 +00:00
job.TaskGroups[0].Tasks[0].Services = nil
job.TaskGroups[0].Services = []*structs.Service{
{
Name: "backend",
PortLabel: "8080",
Connect: &structs.ConsulConnect{
SidecarService: &structs.ConsulSidecarService{},
SidecarTask: &structs.SidecarTask{
Meta: map[string]string{
"source": "test",
},
Resources: &structs.Resources{
CPU: 500,
},
Config: map[string]interface{}{
"labels": map[string]string{
"foo": "bar",
},
},
},
},
},
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
require.NotZero(resp.Index)
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
require.NoError(err)
require.NotNil(out)
require.Equal(resp.JobModifyIndex, out.CreateIndex)
// Check that the sidecar task was injected
require.Len(out.TaskGroups[0].Tasks, 2)
sidecarTask := out.TaskGroups[0].Tasks[1]
require.Equal("connect-proxy-backend", sidecarTask.Name)
require.Equal("connect-proxy:backend", string(sidecarTask.Kind))
require.Equal("connect-proxy-backend", out.TaskGroups[0].Networks[0].DynamicPorts[0].Label)
// Check that the correct fields were overridden from the sidecar_task block
require.Equal("test", sidecarTask.Meta["source"])
require.Equal(500, sidecarTask.Resources.CPU)
require.Equal(connectSidecarResources().MemoryMB, sidecarTask.Resources.MemoryMB)
cfg := connectSidecarDriverConfig()
cfg["labels"] = map[string]interface{}{
"foo": "bar",
}
require.Equal(cfg, sidecarTask.Config)
// Check that round tripping the job doesn't change the sidecarTask
out.Meta["test"] = "abc"
req.Job = out
require.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
require.NotZero(resp.Index)
// Check for the new node in the FSM
state = s1.fsm.State()
ws = memdb.NewWatchSet()
out, err = state.JobByID(ws, job.Namespace, job.ID)
require.NoError(err)
require.NotNil(out)
require.Equal(resp.JobModifyIndex, out.CreateIndex)
require.Len(out.TaskGroups[0].Tasks, 2)
require.Exactly(sidecarTask, out.TaskGroups[0].Tasks[1])
}
func TestJobEndpoint_Register_Connect_ValidatesWithoutSidecarTask(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.TaskGroups[0].Networks = structs.Networks{
{
Mode: "bridge",
},
}
job.TaskGroups[0].Tasks[0].Services = nil
job.TaskGroups[0].Services = []*structs.Service{
{
Name: "backend",
PortLabel: "8080",
Connect: &structs.ConsulConnect{
SidecarService: nil,
},
Checks: []*structs.ServiceCheck{{
Name: "exposed_no_sidecar",
Type: "http",
Expose: true,
Path: "/health",
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
}},
},
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
require.Error(t, err)
require.Contains(t, err.Error(), "exposed_no_sidecar requires use of sidecar_proxy")
}
2017-08-21 04:31:45 +00:00
func TestJobEndpoint_Register_ACL(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
2017-08-21 04:31:45 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2017-08-21 04:31:45 +00:00
testutil.WaitForLeader(t, s1.RPC)
newVolumeJob := func(readonlyVolume bool) *structs.Job {
j := mock.Job()
tg := j.TaskGroups[0]
tg.Volumes = map[string]*structs.VolumeRequest{
"ca-certs": {
config: Hoist volume.config.source into volume Currently, using a Volume in a job uses the following configuration: ``` volume "alias-name" { type = "volume-type" read_only = true config { source = "host_volume_name" } } ``` This commit migrates to the following: ``` volume "alias-name" { type = "volume-type" source = "host_volume_name" read_only = true } ``` The original design was based due to being uncertain about the future of storage plugins, and to allow maxium flexibility. However, this causes a few issues, namely: - We frequently need to parse this configuration during submission, scheduling, and mounting - It complicates the configuration from and end users perspective - It complicates the ability to do validation As we understand the problem space of CSI a little more, it has become clear that we won't need the `source` to be in config, as it will be used in the majority of cases: - Host Volumes: Always need a source - Preallocated CSI Volumes: Always needs a source from a volume or claim name - Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes to for managing upgrades and to avoid dangling. - Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point to the plugin name, and a `config` block will allow you to pass meta to the plugin. Or will point to a pre-configured ephemeral config. *If implemented The new design simplifies this by merging the source into the volume stanza to solve the above issues with usability, performance, and error handling.
2019-09-13 02:09:58 +00:00
Type: structs.VolumeTypeHost,
Source: "prod-ca-certs",
ReadOnly: readonlyVolume,
},
"csi": {
Type: structs.VolumeTypeCSI,
Source: "prod-db",
AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
},
}
tg.Tasks[0].VolumeMounts = []*structs.VolumeMount{
{
Volume: "ca-certs",
Destination: "/etc/ca-certificates",
// Task readonly does not effect acls
ReadOnly: true,
},
}
return j
}
newCSIPluginJob := func() *structs.Job {
j := mock.Job()
t := j.TaskGroups[0].Tasks[0]
t.CSIPluginConfig = &structs.TaskCSIPluginConfig{
ID: "foo",
Type: "node",
}
return j
}
submitJobPolicy := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob, acl.NamespaceCapabilitySubmitJob})
submitJobToken := mock.CreatePolicyAndToken(t, s1.State(), 1001, "test-submit-job", submitJobPolicy)
volumesPolicyReadWrite := mock.HostVolumePolicy("prod-*", "", []string{acl.HostVolumeCapabilityMountReadWrite})
volumesPolicyCSIMount := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityCSIMountVolume}) +
mock.PluginPolicy("read")
submitJobWithVolumesReadWriteToken := mock.CreatePolicyAndToken(t, s1.State(), 1002, "test-submit-volumes", submitJobPolicy+
volumesPolicyReadWrite+
volumesPolicyCSIMount)
volumesPolicyReadOnly := mock.HostVolumePolicy("prod-*", "", []string{acl.HostVolumeCapabilityMountReadOnly})
submitJobWithVolumesReadOnlyToken := mock.CreatePolicyAndToken(t, s1.State(), 1003, "test-submit-volumes-readonly", submitJobPolicy+
volumesPolicyReadOnly+
volumesPolicyCSIMount)
pluginPolicy := mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityCSIRegisterPlugin})
pluginToken := mock.CreatePolicyAndToken(t, s1.State(), 1005, "test-csi-register-plugin", submitJobPolicy+pluginPolicy)
cases := []struct {
Name string
Job *structs.Job
Token string
ErrExpected bool
}{
{
Name: "without a token",
Job: mock.Job(),
Token: "",
ErrExpected: true,
},
{
Name: "with a token",
Job: mock.Job(),
Token: root.SecretID,
ErrExpected: false,
},
{
Name: "with a token that can submit a job, but not use a required volume",
Job: newVolumeJob(false),
Token: submitJobToken.SecretID,
ErrExpected: true,
},
{
Name: "with a token that can submit a job, and use all required volumes",
Job: newVolumeJob(false),
Token: submitJobWithVolumesReadWriteToken.SecretID,
ErrExpected: false,
},
{
Name: "with a token that can submit a job, but only has readonly access",
Job: newVolumeJob(false),
Token: submitJobWithVolumesReadOnlyToken.SecretID,
ErrExpected: true,
},
{
Name: "with a token that can submit a job, and readonly volume access is enough",
Job: newVolumeJob(true),
Token: submitJobWithVolumesReadOnlyToken.SecretID,
ErrExpected: false,
},
{
Name: "with a token that can submit a job, plugin rejected",
Job: newCSIPluginJob(),
Token: submitJobToken.SecretID,
ErrExpected: true,
},
{
Name: "with a token that also has csi-register-plugin, accepted",
Job: newCSIPluginJob(),
Token: pluginToken.SecretID,
ErrExpected: false,
},
2017-08-21 04:31:45 +00:00
}
for _, tt := range cases {
t.Run(tt.Name, func(t *testing.T) {
codec := rpcClient(t, s1)
req := &structs.JobRegisterRequest{
Job: tt.Job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: tt.Job.Namespace,
},
}
req.AuthToken = tt.Token
// Try without a token, expect failure
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
// If we expected an error, then the job should _not_ be registered.
if tt.ErrExpected {
require.Error(t, err, "expected error")
return
}
if !tt.ErrExpected {
require.NoError(t, err, "unexpected error")
}
require.NotEqual(t, 0, resp.Index)
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, tt.Job.Namespace, tt.Job.ID)
require.NoError(t, err)
require.NotNil(t, out)
require.Equal(t, tt.Job.TaskGroups, out.TaskGroups)
})
2017-08-21 04:31:45 +00:00
}
}
2017-09-19 14:47:10 +00:00
func TestJobEndpoint_Register_InvalidNamespace(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2017-09-19 14:47:10 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2017-09-19 14:47:10 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Namespace = "foo"
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-09-19 14:47:10 +00:00
}
// Try without a token, expect failure
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "nonexistent namespace") {
2017-09-19 14:47:10 +00:00
t.Fatalf("expected namespace error: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("expected no job")
}
}
func TestJobEndpoint_Register_Payload(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job containing an invalid driver
// config
job := mock.Job()
job.Payload = []byte{0x1}
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil {
t.Fatalf("expected a validation error")
}
if !strings.Contains(err.Error(), "payload") {
t.Fatalf("expected a payload error but got: %v", err)
}
}
func TestJobEndpoint_Register_Existing(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Update the job definition
job2 := mock.Job()
job2.Priority = 100
job2.ID = job.ID
req.Job = job2
// Attempt update
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.ModifyIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if out.Priority != 100 {
t.Fatalf("expected update")
}
if out.Version != 1 {
t.Fatalf("expected update")
}
// Lookup the evaluation
2017-02-08 05:22:48 +00:00
eval, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != job2.Priority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != job2.Type {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobRegister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job2.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
if eval.CreateTime == 0 {
t.Fatalf("eval CreateTime is unset: %#v", eval)
}
if eval.ModifyTime == 0 {
t.Fatalf("eval ModifyTime is unset: %#v", eval)
}
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check to ensure the job version didn't get bumped because we submitted
// the same job
state = s1.fsm.State()
ws = memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.Version != 1 {
t.Fatalf("expected no update; got %v; diff %v", out.Version, pretty.Diff(job2, out))
}
}
2015-12-01 19:40:40 +00:00
func TestJobEndpoint_Register_Periodic(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2015-12-01 19:40:40 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2015-12-01 19:40:40 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request for a periodic job.
2015-12-05 00:53:36 +00:00
job := mock.PeriodicJob()
2015-12-01 19:40:40 +00:00
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2015-12-01 19:40:40 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
2015-12-01 19:40:40 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
2016-06-12 23:36:49 +00:00
serviceName := out.TaskGroups[0].Tasks[0].Services[0].Name
2015-12-01 19:40:40 +00:00
expectedServiceName := "web-frontend"
if serviceName != expectedServiceName {
t.Fatalf("Expected Service Name: %s, Actual: %s", expectedServiceName, serviceName)
}
if resp.EvalID != "" {
t.Fatalf("Register created an eval for a periodic job")
}
}
func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-12-02 23:37:26 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-12-02 23:37:26 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request for a parameterized job.
2018-03-20 21:49:29 +00:00
job := mock.BatchJob()
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
2016-12-02 23:37:26 +00:00
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-12-02 23:37:26 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
2016-12-14 20:50:08 +00:00
// Check for the job in the FSM
2016-12-02 23:37:26 +00:00
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
2016-12-02 23:37:26 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if resp.EvalID != "" {
t.Fatalf("Register created an eval for a parameterized job")
2016-12-02 23:37:26 +00:00
}
}
func TestJobEndpoint_Register_Dispatched(t *testing.T) {
ci.Parallel(t)
2018-06-11 17:50:50 +00:00
require := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job with 'Dispatch' set to true
job := mock.Job()
job.Dispatched = true
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
2018-06-11 17:50:50 +00:00
require.Error(err)
require.Contains(err.Error(), "job can't be submitted with 'Dispatched'")
}
2016-06-08 23:48:02 +00:00
func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-06-08 23:48:02 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-06-08 23:48:02 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request and enforcing an incorrect index
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: 100, // Not registered yet so not possible
2017-09-07 23:56:15 +00:00
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-06-08 23:48:02 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error")
}
// Create the register request and enforcing it is new
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: 0,
2017-09-07 23:56:15 +00:00
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-06-08 23:48:02 +00:00
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
curIndex := resp.JobModifyIndex
// Check for the node in the FSM
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
2016-06-08 23:48:02 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
// Reregister request and enforcing it be a new job
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: 0,
2017-09-07 23:56:15 +00:00
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-06-08 23:48:02 +00:00
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error")
}
// Reregister request and enforcing it be at an incorrect index
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: curIndex - 1,
2017-09-07 23:56:15 +00:00
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-06-08 23:48:02 +00:00
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), RegisterEnforceIndexErrPrefix) {
t.Fatalf("expected enforcement error")
}
// Reregister request and enforcing it be at the correct index
job.Priority = job.Priority + 1
req = &structs.JobRegisterRequest{
Job: job,
EnforceIndex: true,
JobModifyIndex: curIndex,
2017-09-07 23:56:15 +00:00
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-06-08 23:48:02 +00:00
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
2017-09-07 23:56:15 +00:00
out, err = state.JobByID(ws, job.Namespace, job.ID)
2016-06-08 23:48:02 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.Priority != job.Priority {
t.Fatalf("priority mis-match")
}
}
// TestJobEndpoint_Register_Vault_Disabled asserts that submitting a job that
// uses Vault when Vault is *disabled* results in an error.
2016-08-17 00:50:14 +00:00
func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-08-17 00:50:14 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
2016-10-11 20:28:18 +00:00
f := false
c.VaultConfig.Enabled = &f
2016-08-17 00:50:14 +00:00
})
defer cleanupS1()
2016-08-17 00:50:14 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job asking for a vault policy
job := mock.Job()
2016-10-18 21:54:14 +00:00
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
2016-08-17 00:50:14 +00:00
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-08-17 00:50:14 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "Vault not enabled") {
t.Fatalf("expected Vault not enabled error: %v", err)
}
}
// TestJobEndpoint_Register_Vault_AllowUnauthenticated asserts submitting a job
// with a Vault policy but without a Vault token is *succeeds* if
// allow_unauthenticated=true.
2016-08-17 00:50:14 +00:00
func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-08-17 00:50:14 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-08-17 00:50:14 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault and allow authenticated
2016-10-11 20:28:18 +00:00
tr := true
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &tr
2016-08-17 00:50:14 +00:00
// Replace the Vault Client on the server
s1.vault = &TestVaultClient{}
// Create the register request with a job asking for a vault policy
job := mock.Job()
2016-10-18 21:54:14 +00:00
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
2016-08-17 00:50:14 +00:00
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-08-17 00:50:14 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
2016-08-17 00:50:14 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
}
// TestJobEndpoint_Register_Vault_OverrideConstraint asserts that job
// submitters can specify their own Vault constraint to override the
// automatically injected one.
func TestJobEndpoint_Register_Vault_OverrideConstraint(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault and allow authenticated
tr := true
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &tr
// Replace the Vault Client on the server
s1.vault = &TestVaultClient{}
// Create the register request with a job asking for a vault policy
job := mock.Job()
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
job.TaskGroups[0].Tasks[0].Constraints = []*structs.Constraint{
{
LTarget: "${attr.vault.version}",
Operand: "is_set",
},
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
2019-11-13 20:49:41 +00:00
require.NoError(t, err)
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
require.NoError(t, err)
require.NotNil(t, out)
require.Equal(t, resp.JobModifyIndex, out.CreateIndex)
// Assert constraint was not overridden by the server
outConstraints := out.TaskGroups[0].Tasks[0].Constraints
require.Len(t, outConstraints, 1)
require.True(t, job.TaskGroups[0].Tasks[0].Constraints[0].Equal(outConstraints[0]))
}
2016-08-17 00:50:14 +00:00
func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-08-17 00:50:14 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-08-17 00:50:14 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
2016-10-11 20:28:18 +00:00
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
2016-08-17 00:50:14 +00:00
// Replace the Vault Client on the server
s1.vault = &TestVaultClient{}
// Create the register request with a job asking for a vault policy but
// don't send a Vault token
job := mock.Job()
2016-10-18 21:54:14 +00:00
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
2016-08-17 00:50:14 +00:00
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-08-17 00:50:14 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), "missing Vault token") {
2016-08-17 00:50:14 +00:00
t.Fatalf("expected Vault not enabled error: %v", err)
}
}
func TestJobEndpoint_Register_Vault_Policies(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-08-17 00:50:14 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-08-17 00:50:14 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
2016-10-11 20:28:18 +00:00
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
2016-08-17 00:50:14 +00:00
// Replace the Vault Client on the server
tvc := &TestVaultClient{}
s1.vault = tvc
// Add three tokens: one that allows the requesting policy, one that does
// not and one that returns an error
policy := "foo"
badToken := uuid.Generate()
2016-08-17 00:50:14 +00:00
badPolicies := []string{"a", "b", "c"}
tvc.SetLookupTokenAllowedPolicies(badToken, badPolicies)
goodToken := uuid.Generate()
2016-08-17 00:50:14 +00:00
goodPolicies := []string{"foo", "bar", "baz"}
tvc.SetLookupTokenAllowedPolicies(goodToken, goodPolicies)
rootToken := uuid.Generate()
2016-09-01 19:05:08 +00:00
rootPolicies := []string{"root"}
tvc.SetLookupTokenAllowedPolicies(rootToken, rootPolicies)
errToken := uuid.Generate()
2016-08-17 00:50:14 +00:00
expectedErr := fmt.Errorf("return errors from vault")
tvc.SetLookupTokenError(errToken, expectedErr)
// Create the register request with a job asking for a vault policy but
// send the bad Vault token
job := mock.Job()
job.VaultToken = badToken
2016-10-18 21:54:14 +00:00
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
2016-08-17 00:50:14 +00:00
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-08-17 00:50:14 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(),
"doesn't allow access to the following policies: "+policy) {
t.Fatalf("expected permission denied error: %v", err)
}
// Use the err token
job.VaultToken = errToken
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if err == nil || !strings.Contains(err.Error(), expectedErr.Error()) {
t.Fatalf("expected permission denied error: %v", err)
}
// Use the good token
job.VaultToken = goodToken
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
2016-08-17 00:50:14 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if out.VaultToken != "" {
t.Fatalf("vault token not cleared")
}
2016-09-01 19:05:08 +00:00
// Check that an implicit constraints were created for Vault and Consul.
2016-09-01 21:23:40 +00:00
constraints := out.TaskGroups[0].Constraints
if l := len(constraints); l != 2 {
2016-09-01 21:23:40 +00:00
t.Fatalf("Unexpected number of tests: %v", l)
}
require.ElementsMatch(t, constraints, []*structs.Constraint{consulServiceDiscoveryConstraint, vaultConstraint})
2016-09-01 21:23:40 +00:00
2016-09-01 19:05:08 +00:00
// Create the register request with another job asking for a vault policy but
// send the root Vault token
job2 := mock.Job()
job2.VaultToken = rootToken
2016-10-18 21:54:14 +00:00
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
2016-09-01 19:05:08 +00:00
req = &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job2,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-09-01 19:05:08 +00:00
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
2017-09-07 23:56:15 +00:00
out, err = state.JobByID(ws, job2.Namespace, job2.ID)
2016-09-01 19:05:08 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
if out.VaultToken != "" {
t.Fatalf("vault token not cleared")
}
2016-08-17 00:50:14 +00:00
}
func TestJobEndpoint_Register_Vault_MultiNamespaces(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
// Replace the Vault Client on the server
tvc := &TestVaultClient{}
s1.vault = tvc
goodToken := uuid.Generate()
goodPolicies := []string{"foo", "bar", "baz"}
tvc.SetLookupTokenAllowedPolicies(goodToken, goodPolicies)
// Create the register request with a job asking for a vault policy but
// don't send a Vault token
job := mock.Job()
job.VaultToken = goodToken
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Namespace: "ns1",
Policies: []string{"foo"},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
// OSS or Ent check
if err != nil && s1.EnterpriseState.Features() == 0 {
// errors.Is cannot be used because the RPC call break error wrapping.
require.Contains(t, err.Error(), ErrMultipleNamespaces.Error())
} else {
require.NoError(t, err)
}
}
// TestJobEndpoint_Register_SemverConstraint asserts that semver ordering is
// used when evaluating semver constraints.
func TestJobEndpoint_Register_SemverConstraint(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.State()
// Create a job with a semver constraint
job := mock.Job()
job.Constraints = []*structs.Constraint{
{
LTarget: "${attr.vault.version}",
RTarget: ">= 0.6.1",
Operand: structs.ConstraintSemver,
},
}
job.TaskGroups[0].Count = 1
// Insert 2 Nodes, 1 matching the constraint, 1 not
node1 := mock.Node()
node1.Attributes["vault.version"] = "1.3.0-beta1+ent"
node1.ComputeClass()
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1, node1))
node2 := mock.Node()
delete(node2.Attributes, "vault.version")
node2.ComputeClass()
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 2, node2))
// Create the register request
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp))
require.NotZero(t, resp.Index)
// Wait for placements
allocReq := &structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
testutil.WaitForResult(func() (bool, error) {
resp := structs.JobAllocationsResponse{}
err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", allocReq, &resp)
if err != nil {
return false, err
}
if n := len(resp.Allocations); n != 1 {
return false, fmt.Errorf("expected 1 alloc, found %d", n)
}
alloc := resp.Allocations[0]
if alloc.NodeID != node1.ID {
return false, fmt.Errorf("expected alloc to be one node=%q but found node=%q",
node1.ID, alloc.NodeID)
}
return true, nil
}, func(waitErr error) {
evals, err := state.EvalsByJob(nil, structs.DefaultNamespace, job.ID)
require.NoError(t, err)
for i, e := range evals {
t.Logf("%d Eval: %s", i, pretty.Sprint(e))
}
require.NoError(t, waitErr)
})
}
// TestJobEndpoint_Register_EvalCreation asserts that job register creates an
// eval atomically with the registration
Atomic eval insertion with job (de-)registration This fixes a bug where jobs may get "stuck" unprocessed that dispropotionately affect periodic jobs around leadership transitions. When registering a job, the job registration and the eval to process it get applied to raft as two separate transactions; if the job registration succeeds but eval application fails, the job may remain unprocessed. Operators may detect such failure, when submitting a job update and get a 500 error code, and they could retry; periodic jobs failures are more likely to go unnoticed, and no further periodic invocations will be processed until an operator force evaluation. This fixes the issue by ensuring that the job registration and eval application get persisted and processed atomically in the same raft log entry. Also, applies the same change to ensure atomicity in job deregistration. Backward Compatibility We must maintain compatibility in two scenarios: mixed clusters where a leader can handle atomic updates but followers cannot, and a recent cluster processes old log entries from legacy or mixed cluster mode. To handle this constraints: ensure that the leader continue to emit the Evaluation log entry until all servers have upgraded; also, when processing raft logs, the servers honor evaluations found in both spots, the Eval in job (de-)registration and the eval update entries. When an updated server sees mix-mode behavior where an eval is inserted into the raft log twice, it ignores the second instance. I made one compromise in consistency in the mixed-mode scenario: servers may disagree on the eval.CreateIndex value: the leader and updated servers will report the job registration index while old servers will report the index of the eval update log entry. This discripency doesn't seem to be material - it's the eval.JobModifyIndex that matters.
2020-07-10 17:31:55 +00:00
func TestJobEndpoint_Register_EvalCreation_Modern(t *testing.T) {
ci.Parallel(t)
Atomic eval insertion with job (de-)registration This fixes a bug where jobs may get "stuck" unprocessed that dispropotionately affect periodic jobs around leadership transitions. When registering a job, the job registration and the eval to process it get applied to raft as two separate transactions; if the job registration succeeds but eval application fails, the job may remain unprocessed. Operators may detect such failure, when submitting a job update and get a 500 error code, and they could retry; periodic jobs failures are more likely to go unnoticed, and no further periodic invocations will be processed until an operator force evaluation. This fixes the issue by ensuring that the job registration and eval application get persisted and processed atomically in the same raft log entry. Also, applies the same change to ensure atomicity in job deregistration. Backward Compatibility We must maintain compatibility in two scenarios: mixed clusters where a leader can handle atomic updates but followers cannot, and a recent cluster processes old log entries from legacy or mixed cluster mode. To handle this constraints: ensure that the leader continue to emit the Evaluation log entry until all servers have upgraded; also, when processing raft logs, the servers honor evaluations found in both spots, the Eval in job (de-)registration and the eval update entries. When an updated server sees mix-mode behavior where an eval is inserted into the raft log twice, it ignores the second instance. I made one compromise in consistency in the mixed-mode scenario: servers may disagree on the eval.CreateIndex value: the leader and updated servers will report the job registration index while old servers will report the index of the eval update log entry. This discripency doesn't seem to be material - it's the eval.JobModifyIndex that matters.
2020-07-10 17:31:55 +00:00
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
t.Run("job registration always create evals", func(t *testing.T) {
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
//// initial registration should create the job and a new eval
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
require.NoError(t, err)
require.NotZero(t, resp.Index)
require.NotEmpty(t, resp.EvalID)
// Check for the job in the FSM
state := s1.fsm.State()
out, err := state.JobByID(nil, job.Namespace, job.ID)
require.NoError(t, err)
require.NotNil(t, out)
require.Equal(t, resp.JobModifyIndex, out.CreateIndex)
// Lookup the evaluation
eval, err := state.EvalByID(nil, resp.EvalID)
require.NoError(t, err)
require.NotNil(t, eval)
require.Equal(t, resp.EvalCreateIndex, eval.CreateIndex)
require.Nil(t, evalUpdateFromRaft(t, s1, eval.ID))
//// re-registration should create a new eval, but leave the job untouched
var resp2 structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp2)
require.NoError(t, err)
require.NotZero(t, resp2.Index)
require.NotEmpty(t, resp2.EvalID)
require.NotEqual(t, resp.EvalID, resp2.EvalID)
// Check for the job in the FSM
state = s1.fsm.State()
out, err = state.JobByID(nil, job.Namespace, job.ID)
require.NoError(t, err)
require.NotNil(t, out)
require.Equal(t, resp2.JobModifyIndex, out.CreateIndex)
require.Equal(t, out.CreateIndex, out.JobModifyIndex)
// Lookup the evaluation
eval, err = state.EvalByID(nil, resp2.EvalID)
require.NoError(t, err)
require.NotNil(t, eval)
require.Equal(t, resp2.EvalCreateIndex, eval.CreateIndex)
raftEval := evalUpdateFromRaft(t, s1, eval.ID)
require.Equal(t, raftEval, eval)
Atomic eval insertion with job (de-)registration This fixes a bug where jobs may get "stuck" unprocessed that dispropotionately affect periodic jobs around leadership transitions. When registering a job, the job registration and the eval to process it get applied to raft as two separate transactions; if the job registration succeeds but eval application fails, the job may remain unprocessed. Operators may detect such failure, when submitting a job update and get a 500 error code, and they could retry; periodic jobs failures are more likely to go unnoticed, and no further periodic invocations will be processed until an operator force evaluation. This fixes the issue by ensuring that the job registration and eval application get persisted and processed atomically in the same raft log entry. Also, applies the same change to ensure atomicity in job deregistration. Backward Compatibility We must maintain compatibility in two scenarios: mixed clusters where a leader can handle atomic updates but followers cannot, and a recent cluster processes old log entries from legacy or mixed cluster mode. To handle this constraints: ensure that the leader continue to emit the Evaluation log entry until all servers have upgraded; also, when processing raft logs, the servers honor evaluations found in both spots, the Eval in job (de-)registration and the eval update entries. When an updated server sees mix-mode behavior where an eval is inserted into the raft log twice, it ignores the second instance. I made one compromise in consistency in the mixed-mode scenario: servers may disagree on the eval.CreateIndex value: the leader and updated servers will report the job registration index while old servers will report the index of the eval update log entry. This discripency doesn't seem to be material - it's the eval.JobModifyIndex that matters.
2020-07-10 17:31:55 +00:00
//// an update should update the job and create a new eval
req.Job.TaskGroups[0].Name += "a"
var resp3 structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp3)
require.NoError(t, err)
require.NotZero(t, resp3.Index)
require.NotEmpty(t, resp3.EvalID)
require.NotEqual(t, resp.EvalID, resp3.EvalID)
// Check for the job in the FSM
state = s1.fsm.State()
out, err = state.JobByID(nil, job.Namespace, job.ID)
require.NoError(t, err)
require.NotNil(t, out)
require.Equal(t, resp3.JobModifyIndex, out.JobModifyIndex)
// Lookup the evaluation
eval, err = state.EvalByID(nil, resp3.EvalID)
require.NoError(t, err)
require.NotNil(t, eval)
require.Equal(t, resp3.EvalCreateIndex, eval.CreateIndex)
require.Nil(t, evalUpdateFromRaft(t, s1, eval.ID))
})
// Registering a parameterized job shouldn't create an eval
t.Run("periodic jobs shouldn't create an eval", func(t *testing.T) {
job := mock.PeriodicJob()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
require.NoError(t, err)
require.NotZero(t, resp.Index)
require.Empty(t, resp.EvalID)
// Check for the job in the FSM
state := s1.fsm.State()
out, err := state.JobByID(nil, job.Namespace, job.ID)
require.NoError(t, err)
require.NotNil(t, out)
require.Equal(t, resp.JobModifyIndex, out.CreateIndex)
})
}
Allow configuring memory oversubscription (#10466) Cluster operators want to have better control over memory oversubscription and may want to enable/disable it based on their experience. This PR adds a scheduler configuration field to control memory oversubscription. It's additional field that can be set in the [API via Scheduler Config](https://www.nomadproject.io/api-docs/operator/scheduler), or [the agent server config](https://www.nomadproject.io/docs/configuration/server#configuring-scheduler-config). I opted to have the memory oversubscription be an opt-in, but happy to change it. To enable it, operators should call the API with: ```json { "MemoryOversubscriptionEnabled": true } ``` If memory oversubscription is disabled, submitting jobs specifying `memory_max` will get a "Memory oversubscription is not enabled" warnings, but the jobs will be accepted without them accessing the additional memory. The warning message is like: ``` $ nomad job run /tmp/j Job Warnings: 1 warning(s): * Memory oversubscription is not enabled; Task cache.redis memory_max value will be ignored ==> Monitoring evaluation "7c444157" Evaluation triggered by job "example" ==> Monitoring evaluation "7c444157" Evaluation within deployment: "9d826f13" Allocation "aa5c3cad" created: node "9272088e", group "cache" Evaluation status changed: "pending" -> "complete" ==> Evaluation "7c444157" finished with status "complete" # then you can examine the Alloc AllocatedResources to validate whether the task is allowed to exceed memory: $ nomad alloc status -json aa5c3cad | jq '.AllocatedResources.Tasks["redis"].Memory' { "MemoryMB": 256, "MemoryMaxMB": 0 } ```
2021-04-30 02:09:56 +00:00
func TestJobEndpoint_Register_ValidateMemoryMax(t *testing.T) {
ci.Parallel(t)
Allow configuring memory oversubscription (#10466) Cluster operators want to have better control over memory oversubscription and may want to enable/disable it based on their experience. This PR adds a scheduler configuration field to control memory oversubscription. It's additional field that can be set in the [API via Scheduler Config](https://www.nomadproject.io/api-docs/operator/scheduler), or [the agent server config](https://www.nomadproject.io/docs/configuration/server#configuring-scheduler-config). I opted to have the memory oversubscription be an opt-in, but happy to change it. To enable it, operators should call the API with: ```json { "MemoryOversubscriptionEnabled": true } ``` If memory oversubscription is disabled, submitting jobs specifying `memory_max` will get a "Memory oversubscription is not enabled" warnings, but the jobs will be accepted without them accessing the additional memory. The warning message is like: ``` $ nomad job run /tmp/j Job Warnings: 1 warning(s): * Memory oversubscription is not enabled; Task cache.redis memory_max value will be ignored ==> Monitoring evaluation "7c444157" Evaluation triggered by job "example" ==> Monitoring evaluation "7c444157" Evaluation within deployment: "9d826f13" Allocation "aa5c3cad" created: node "9272088e", group "cache" Evaluation status changed: "pending" -> "complete" ==> Evaluation "7c444157" finished with status "complete" # then you can examine the Alloc AllocatedResources to validate whether the task is allowed to exceed memory: $ nomad alloc status -json aa5c3cad | jq '.AllocatedResources.Tasks["redis"].Memory' { "MemoryMB": 256, "MemoryMaxMB": 0 } ```
2021-04-30 02:09:56 +00:00
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
submitNewJob := func() *structs.JobRegisterResponse {
job := mock.Job()
job.TaskGroups[0].Tasks[0].Resources.MemoryMaxMB = 2000
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
require.NoError(t, err)
return &resp
}
// try default case: Memory oversubscription is disabled
resp := submitNewJob()
require.Contains(t, resp.Warnings, "Memory oversubscription is not enabled")
// enable now and try again
s1.State().SchedulerSetConfig(100, &structs.SchedulerConfiguration{
MemoryOversubscriptionEnabled: true,
})
resp = submitNewJob()
require.Empty(t, resp.Warnings)
}
Atomic eval insertion with job (de-)registration This fixes a bug where jobs may get "stuck" unprocessed that dispropotionately affect periodic jobs around leadership transitions. When registering a job, the job registration and the eval to process it get applied to raft as two separate transactions; if the job registration succeeds but eval application fails, the job may remain unprocessed. Operators may detect such failure, when submitting a job update and get a 500 error code, and they could retry; periodic jobs failures are more likely to go unnoticed, and no further periodic invocations will be processed until an operator force evaluation. This fixes the issue by ensuring that the job registration and eval application get persisted and processed atomically in the same raft log entry. Also, applies the same change to ensure atomicity in job deregistration. Backward Compatibility We must maintain compatibility in two scenarios: mixed clusters where a leader can handle atomic updates but followers cannot, and a recent cluster processes old log entries from legacy or mixed cluster mode. To handle this constraints: ensure that the leader continue to emit the Evaluation log entry until all servers have upgraded; also, when processing raft logs, the servers honor evaluations found in both spots, the Eval in job (de-)registration and the eval update entries. When an updated server sees mix-mode behavior where an eval is inserted into the raft log twice, it ignores the second instance. I made one compromise in consistency in the mixed-mode scenario: servers may disagree on the eval.CreateIndex value: the leader and updated servers will report the job registration index while old servers will report the index of the eval update log entry. This discripency doesn't seem to be material - it's the eval.JobModifyIndex that matters.
2020-07-10 17:31:55 +00:00
// evalUpdateFromRaft searches the raft logs for the eval update pertaining to the eval
func evalUpdateFromRaft(t *testing.T, s *Server, evalID string) *structs.Evaluation {
var store raft.LogStore = s.raftInmem
if store == nil {
store = s.raftStore
}
require.NotNil(t, store)
li, _ := store.LastIndex()
for i, _ := store.FirstIndex(); i <= li; i++ {
var log raft.Log
err := store.GetLog(i, &log)
require.NoError(t, err)
if log.Type != raft.LogCommand {
continue
}
if structs.MessageType(log.Data[0]) != structs.EvalUpdateRequestType {
continue
}
var req structs.EvalUpdateRequest
structs.Decode(log.Data[1:], &req)
require.NoError(t, err)
for _, eval := range req.Evals {
if eval.ID == evalID {
eval.CreateIndex = i
eval.ModifyIndex = i
return eval
}
}
}
return nil
}
2020-10-21 04:16:25 +00:00
func TestJobEndpoint_Register_ACL_Namespace(t *testing.T) {
ci.Parallel(t)
2020-10-21 04:16:25 +00:00
s1, _, cleanupS1 := TestACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Policy with read on default namespace and write on non default
policy := &structs.ACLPolicy{
Name: fmt.Sprintf("policy-%s", uuid.Generate()),
Description: "Super cool policy!",
Rules: `
namespace "default" {
policy = "read"
}
namespace "test" {
policy = "write"
}
node {
policy = "read"
}
agent {
policy = "read"
}
`,
CreateIndex: 10,
ModifyIndex: 20,
}
policy.SetHash()
assert := assert.New(t)
// Upsert policy and token
token := mock.ACLToken()
token.Policies = []string{policy.Name}
err := s1.State().UpsertACLPolicies(structs.MsgTypeTestSetup, 100, []*structs.ACLPolicy{policy})
2020-10-21 04:16:25 +00:00
assert.Nil(err)
err = s1.State().UpsertACLTokens(structs.MsgTypeTestSetup, 110, []*structs.ACLToken{token})
2020-10-21 04:16:25 +00:00
assert.Nil(err)
// Upsert namespace
ns := mock.Namespace()
ns.Name = "test"
err = s1.fsm.State().UpsertNamespaces(1000, []*structs.Namespace{ns})
assert.Nil(err)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
}
req.AuthToken = token.SecretID
// Use token without write access to default namespace, expect failure
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
assert.NotNil(err, "expected permission denied")
req.Namespace = "test"
job.Namespace = "test"
// Use token with write access to default namespace, expect success
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
assert.Nil(err, "unexpected err: %v", err)
assert.NotEqual(resp.Index, 0, "bad index: %d", resp.Index)
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
assert.Nil(err)
assert.NotNil(out, "expected job")
}
func TestJobRegister_ACL_RejectedBySchedulerConfig(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
submitJobToken := mock.CreatePolicyAndToken(t, s1.State(), 1001, "test-valid-write",
mock.NamespacePolicy(structs.DefaultNamespace, "write", nil)).
SecretID
cases := []struct {
name string
token string
rejectEnabled bool
errExpected string
}{
{
name: "reject disabled, with a submit token",
token: submitJobToken,
rejectEnabled: false,
},
{
name: "reject enabled, with a submit token",
token: submitJobToken,
rejectEnabled: true,
errExpected: structs.ErrJobRegistrationDisabled.Error(),
},
{
name: "reject enabled, without a token",
token: "",
rejectEnabled: true,
errExpected: structs.ErrPermissionDenied.Error(),
},
{
name: "reject enabled, with a management token",
token: root.SecretID,
rejectEnabled: true,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
req.AuthToken = tc.token
cfgReq := &structs.SchedulerSetConfigRequest{
Config: structs.SchedulerConfiguration{
RejectJobRegistration: tc.rejectEnabled,
},
WriteRequest: structs.WriteRequest{
Region: "global",
},
}
cfgReq.AuthToken = root.SecretID
err := msgpackrpc.CallWithCodec(codec, "Operator.SchedulerSetConfiguration",
cfgReq, &structs.SchedulerSetConfigurationResponse{},
)
require.NoError(t, err)
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if tc.errExpected != "" {
require.Error(t, err, "expected error")
require.EqualError(t, err, tc.errExpected)
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
require.NoError(t, err)
require.Nil(t, out)
} else {
require.NoError(t, err, "unexpected error")
require.NotEqual(t, 0, resp.Index)
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
require.NoError(t, err)
require.NotNil(t, out)
require.Equal(t, job.TaskGroups, out.TaskGroups)
}
})
}
}
2017-04-18 22:11:33 +00:00
func TestJobEndpoint_Revert(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2017-04-18 22:11:33 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2017-04-18 22:11:33 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the initial register request
job := mock.Job()
job.Priority = 100
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-04-18 22:11:33 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Reregister again to get another version
job2 := job.Copy()
job2.Priority = 1
req = &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job2,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-04-18 22:11:33 +00:00
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create revert request and enforcing it be at an incorrect version
revertReq := &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
EnforcePriorVersion: pointer.Of(uint64(10)),
2017-09-07 23:56:15 +00:00
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-04-18 22:11:33 +00:00
}
// Fetch the response
err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
if err == nil || !strings.Contains(err.Error(), "enforcing version 10") {
t.Fatalf("expected enforcement error")
}
2017-04-19 20:28:29 +00:00
// Create revert request and enforcing it be at the current version
revertReq = &structs.JobRevertRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
JobVersion: 1,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-04-19 20:28:29 +00:00
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
if err == nil || !strings.Contains(err.Error(), "current version") {
t.Fatalf("expected current version err: %v", err)
}
2017-04-18 22:11:33 +00:00
// Create revert request and enforcing it be at version 1
revertReq = &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
EnforcePriorVersion: pointer.Of(uint64(1)),
2017-09-07 23:56:15 +00:00
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-04-18 22:11:33 +00:00
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
if resp.EvalID == "" || resp.EvalCreateIndex == 0 {
t.Fatalf("bad created eval: %+v", resp)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad job modify index: %d", resp.JobModifyIndex)
}
2017-06-29 18:01:41 +00:00
// Create revert request and don't enforce. We are at version 2 but it is
// the same as version 0
2017-04-18 22:11:33 +00:00
revertReq = &structs.JobRevertRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
JobVersion: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-04-18 22:11:33 +00:00
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
if resp.EvalID == "" || resp.EvalCreateIndex == 0 {
t.Fatalf("bad created eval: %+v", resp)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad job modify index: %d", resp.JobModifyIndex)
}
// Check that the job is at the correct version and that the eval was
// created
state := s1.fsm.State()
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
2017-04-18 22:11:33 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.Priority != job.Priority {
t.Fatalf("priority mis-match")
}
2017-06-29 18:01:41 +00:00
if out.Version != 2 {
t.Fatalf("got version %d; want %d", out.Version, 2)
2017-04-18 22:11:33 +00:00
}
eout, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eout == nil {
t.Fatalf("expected eval")
}
if eout.JobID != job.ID {
t.Fatalf("job id mis-match")
}
2017-09-07 23:56:15 +00:00
versions, err := state.JobVersionsByID(ws, job.Namespace, job.ID)
2017-04-18 22:11:33 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
2017-06-29 18:01:41 +00:00
if len(versions) != 3 {
t.Fatalf("got %d versions; want %d", len(versions), 3)
2017-04-18 22:11:33 +00:00
}
}
func TestJobEndpoint_Revert_Vault_NoToken(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
// Replace the Vault Client on the server
tvc := &TestVaultClient{}
s1.vault = tvc
// Add three tokens: one that allows the requesting policy, one that does
// not and one that returns an error
policy := "foo"
goodToken := uuid.Generate()
goodPolicies := []string{"foo", "bar", "baz"}
tvc.SetLookupTokenAllowedPolicies(goodToken, goodPolicies)
// Create the initial register request
job := mock.Job()
job.VaultToken = goodToken
job.Priority = 100
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Reregister again to get another version
job2 := job.Copy()
job2.Priority = 1
req = &structs.JobRegisterRequest{
Job: job2,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
revertReq := &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 1,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
if err == nil || !strings.Contains(err.Error(), "current version") {
t.Fatalf("expected current version err: %v", err)
}
// Create revert request and enforcing it be at version 1
revertReq = &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
EnforcePriorVersion: pointer.Of(uint64(1)),
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
if err == nil || !strings.Contains(err.Error(), "missing Vault token") {
t.Fatalf("expected Vault not enabled error: %v", err)
}
}
// TestJobEndpoint_Revert_Vault_Policies asserts that job revert uses the
// revert request's Vault token when authorizing policies.
func TestJobEndpoint_Revert_Vault_Policies(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
// Replace the Vault Client on the server
tvc := &TestVaultClient{}
s1.vault = tvc
// Add three tokens: one that allows the requesting policy, one that does
// not and one that returns an error
policy := "foo"
badToken := uuid.Generate()
badPolicies := []string{"a", "b", "c"}
tvc.SetLookupTokenAllowedPolicies(badToken, badPolicies)
registerGoodToken := uuid.Generate()
goodPolicies := []string{"foo", "bar", "baz"}
tvc.SetLookupTokenAllowedPolicies(registerGoodToken, goodPolicies)
revertGoodToken := uuid.Generate()
revertGoodPolicies := []string{"foo", "bar_revert", "baz_revert"}
tvc.SetLookupTokenAllowedPolicies(revertGoodToken, revertGoodPolicies)
rootToken := uuid.Generate()
rootPolicies := []string{"root"}
tvc.SetLookupTokenAllowedPolicies(rootToken, rootPolicies)
errToken := uuid.Generate()
expectedErr := fmt.Errorf("return errors from vault")
tvc.SetLookupTokenError(errToken, expectedErr)
// Create the initial register request
job := mock.Job()
job.VaultToken = registerGoodToken
job.Priority = 100
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Reregister again to get another version
job2 := job.Copy()
job2.Priority = 1
req = &structs.JobRegisterRequest{
Job: job2,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Create the revert request with the bad Vault token
revertReq := &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
VaultToken: badToken,
}
// Fetch the response
err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
if err == nil || !strings.Contains(err.Error(),
"doesn't allow access to the following policies: "+policy) {
t.Fatalf("expected permission denied error: %v", err)
}
// Use the err token
revertReq.VaultToken = errToken
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
if err == nil || !strings.Contains(err.Error(), expectedErr.Error()) {
t.Fatalf("expected permission denied error: %v", err)
}
// Use a good token
revertReq.VaultToken = revertGoodToken
if err := msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
}
2017-09-25 21:36:22 +00:00
func TestJobEndpoint_Revert_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-09-25 21:36:22 +00:00
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
2017-09-25 21:36:22 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2017-09-25 21:36:22 +00:00
codec := rpcClient(t, s1)
state := s1.fsm.State()
testutil.WaitForLeader(t, s1.RPC)
// Create the jobs
2017-09-25 21:36:22 +00:00
job := mock.Job()
err := state.UpsertJob(structs.MsgTypeTestSetup, 300, nil, job)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-25 21:36:22 +00:00
job2 := job.Copy()
job2.Priority = 1
err = state.UpsertJob(structs.MsgTypeTestSetup, 400, nil, job2)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-25 21:36:22 +00:00
// Create revert request and enforcing it be at the current version
revertReq := &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Attempt to fetch the response without a valid token
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-25 21:36:22 +00:00
// Attempt to fetch the response with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1001, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
2017-09-25 21:36:22 +00:00
2017-10-12 22:16:33 +00:00
revertReq.AuthToken = invalidToken.SecretID
2017-09-25 21:36:22 +00:00
var invalidResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-25 21:36:22 +00:00
// Fetch the response with a valid management token
2017-10-12 22:16:33 +00:00
revertReq.AuthToken = root.SecretID
2017-09-25 21:36:22 +00:00
var validResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &validResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-25 21:36:22 +00:00
// Try with a valid non-management token
validToken := mock.CreatePolicyAndToken(t, state, 1003, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob}))
2017-09-25 21:36:22 +00:00
2017-10-12 22:16:33 +00:00
revertReq.AuthToken = validToken.SecretID
2017-09-25 21:36:22 +00:00
var validResp2 structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Revert", revertReq, &validResp2)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-25 21:36:22 +00:00
}
2017-07-06 19:49:13 +00:00
func TestJobEndpoint_Stable(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2017-07-06 19:49:13 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2017-07-06 19:49:13 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the initial register request
job := mock.Job()
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-07-06 19:49:13 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create stability request
2017-07-06 19:49:13 +00:00
stableReq := &structs.JobStabilityRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
JobVersion: 0,
Stable: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-07-06 19:49:13 +00:00
}
// Fetch the response
var stableResp structs.JobStabilityResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Stable", stableReq, &stableResp); err != nil {
t.Fatalf("err: %v", err)
}
if stableResp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Check that the job is marked stable
state := s1.fsm.State()
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
2017-07-06 19:49:13 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if !out.Stable {
t.Fatalf("Job is not marked stable")
}
}
2017-09-25 22:17:58 +00:00
func TestJobEndpoint_Stable_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-09-25 22:17:58 +00:00
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
2017-09-25 22:17:58 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2017-09-25 22:17:58 +00:00
codec := rpcClient(t, s1)
state := s1.fsm.State()
testutil.WaitForLeader(t, s1.RPC)
// Register the job
job := mock.Job()
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-25 22:17:58 +00:00
// Create stability request
stableReq := &structs.JobStabilityRequest{
JobID: job.ID,
JobVersion: 0,
Stable: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Attempt to fetch the token without a token
var stableResp structs.JobStabilityResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Stable", stableReq, &stableResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains("Permission denied", err.Error())
2017-09-25 22:17:58 +00:00
// Expect failure for request with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-09-25 22:17:58 +00:00
2017-10-12 22:16:33 +00:00
stableReq.AuthToken = invalidToken.SecretID
2017-09-25 22:17:58 +00:00
var invalidStableResp structs.JobStabilityResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Stable", stableReq, &invalidStableResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains("Permission denied", err.Error())
2017-09-25 22:17:58 +00:00
// Attempt to fetch with a management token
2017-10-12 22:16:33 +00:00
stableReq.AuthToken = root.SecretID
2017-09-25 22:17:58 +00:00
var validStableResp structs.JobStabilityResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Stable", stableReq, &validStableResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-25 22:17:58 +00:00
// Attempt to fetch with a valid token
validToken := mock.CreatePolicyAndToken(t, state, 1005, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob}))
2017-09-25 22:17:58 +00:00
2017-10-12 22:16:33 +00:00
stableReq.AuthToken = validToken.SecretID
2017-09-25 22:17:58 +00:00
var validStableResp2 structs.JobStabilityResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Stable", stableReq, &validStableResp2)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-25 22:17:58 +00:00
// Check that the job is marked stable
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, job.ID)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.NotNil(job)
require.Equal(true, out.Stable)
2017-09-25 22:17:58 +00:00
}
func TestJobEndpoint_Evaluate(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Lookup the evaluation
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
eval, err := state.EvalByID(ws, resp.EvalID)
if err != nil {
t.Fatalf("err: %v", err)
}
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != resp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
if eval.Priority != job.Priority {
t.Fatalf("bad: %#v", eval)
}
if eval.Type != job.Type {
t.Fatalf("bad: %#v", eval)
}
if eval.TriggeredBy != structs.EvalTriggerJobRegister {
t.Fatalf("bad: %#v", eval)
}
if eval.JobID != job.ID {
t.Fatalf("bad: %#v", eval)
}
if eval.JobModifyIndex != resp.JobModifyIndex {
t.Fatalf("bad: %#v", eval)
}
if eval.Status != structs.EvalStatusPending {
t.Fatalf("bad: %#v", eval)
}
if eval.CreateTime == 0 {
t.Fatalf("eval CreateTime is unset: %#v", eval)
}
if eval.ModifyTime == 0 {
t.Fatalf("eval ModifyTime is unset: %#v", eval)
}
}
2018-05-09 16:30:42 +00:00
func TestJobEndpoint_ForceRescheduleEvaluate(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2018-05-09 16:30:42 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2018-05-09 16:30:42 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
require.Nil(err)
require.NotEqual(0, resp.Index)
state := s1.fsm.State()
job, err = state.JobByID(nil, structs.DefaultNamespace, job.ID)
require.Nil(err)
2018-05-09 16:30:42 +00:00
// Create a failed alloc
alloc := mock.Alloc()
alloc.Job = job
alloc.JobID = job.ID
alloc.TaskGroup = job.TaskGroups[0].Name
alloc.Namespace = job.Namespace
alloc.ClientStatus = structs.AllocClientStatusFailed
err = s1.State().UpsertAllocs(structs.MsgTypeTestSetup, resp.Index+1, []*structs.Allocation{alloc})
2018-05-09 16:30:42 +00:00
require.Nil(err)
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
EvalOptions: structs.EvalOptions{ForceReschedule: true},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
err = msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp)
require.Nil(err)
require.NotEqual(0, resp.Index)
// Lookup the evaluation
ws := memdb.NewWatchSet()
eval, err := state.EvalByID(ws, resp.EvalID)
require.Nil(err)
require.NotNil(eval)
require.Equal(eval.CreateIndex, resp.EvalCreateIndex)
require.Equal(eval.Priority, job.Priority)
require.Equal(eval.Type, job.Type)
require.Equal(eval.TriggeredBy, structs.EvalTriggerJobRegister)
require.Equal(eval.JobID, job.ID)
require.Equal(eval.JobModifyIndex, resp.JobModifyIndex)
require.Equal(eval.Status, structs.EvalStatusPending)
require.NotZero(eval.CreateTime)
require.NotZero(eval.ModifyTime)
2018-05-09 16:30:42 +00:00
// Lookup the alloc, verify DesiredTransition ForceReschedule
alloc, err = state.AllocByID(ws, alloc.ID)
require.NotNil(alloc)
require.Nil(err)
require.True(*alloc.DesiredTransition.ForceReschedule)
}
func TestJobEndpoint_Evaluate_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
// Create the job
job := mock.Job()
err := state.UpsertJob(structs.MsgTypeTestSetup, 300, nil, job)
2018-03-14 22:32:18 +00:00
require.Nil(err)
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Attempt to fetch the response without a token
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
// Attempt to fetch the response with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-10-12 22:16:33 +00:00
reEval.AuthToken = invalidToken.SecretID
var invalidResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
// Fetch the response with a valid management token
2017-10-12 22:16:33 +00:00
reEval.AuthToken = root.SecretID
var validResp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &validResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
// Fetch the response with a valid token
validToken := mock.CreatePolicyAndToken(t, state, 1005, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob}))
2017-10-12 22:16:33 +00:00
reEval.AuthToken = validToken.SecretID
var validResp2 structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &validResp2)
2018-03-14 22:32:18 +00:00
require.Nil(err)
// Lookup the evaluation
ws := memdb.NewWatchSet()
eval, err := state.EvalByID(ws, validResp2.EvalID)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.NotNil(eval)
require.Equal(eval.CreateIndex, validResp2.EvalCreateIndex)
require.Equal(eval.Priority, job.Priority)
require.Equal(eval.Type, job.Type)
require.Equal(eval.TriggeredBy, structs.EvalTriggerJobRegister)
require.Equal(eval.JobID, job.ID)
require.Equal(eval.JobModifyIndex, validResp2.JobModifyIndex)
require.Equal(eval.Status, structs.EvalStatusPending)
require.NotZero(eval.CreateTime)
require.NotZero(eval.ModifyTime)
}
2015-12-01 19:40:40 +00:00
func TestJobEndpoint_Evaluate_Periodic(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2015-12-01 19:40:40 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2015-12-01 19:40:40 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
2015-12-05 00:53:36 +00:00
job := mock.PeriodicJob()
2015-12-01 19:40:40 +00:00
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2015-12-01 19:40:40 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2015-12-01 19:40:40 +00:00
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp); err == nil {
t.Fatal("expect an err")
}
}
func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-12-02 23:37:26 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-12-02 23:37:26 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
2018-03-20 21:49:29 +00:00
job := mock.BatchJob()
2017-01-22 22:08:40 +00:00
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
2016-12-02 23:37:26 +00:00
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-12-02 23:37:26 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-12-02 23:37:26 +00:00
}
// Fetch the response
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluate", reEval, &resp); err == nil {
t.Fatal("expect an err")
}
}
func TestJobEndpoint_Deregister(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
2018-03-14 22:32:18 +00:00
// Create the register requests
job := mock.Job()
reg := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp))
2017-04-15 23:47:19 +00:00
// Deregister but don't purge
dereg := &structs.JobDeregisterRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
Purge: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2))
require.NotZero(resp2.Index)
2017-04-15 23:47:19 +00:00
// Check for the job in the FSM
state := s1.fsm.State()
2018-03-14 22:32:18 +00:00
out, err := state.JobByID(nil, job.Namespace, job.ID)
require.Nil(err)
require.NotNil(out)
require.True(out.Stop)
// Lookup the evaluation
2018-03-14 22:32:18 +00:00
eval, err := state.EvalByID(nil, resp2.EvalID)
require.Nil(err)
require.NotNil(eval)
require.EqualValues(resp2.EvalCreateIndex, eval.CreateIndex)
require.Equal(job.Priority, eval.Priority)
require.Equal(job.Type, eval.Type)
require.Equal(structs.EvalTriggerJobDeregister, eval.TriggeredBy)
require.Equal(job.ID, eval.JobID)
require.Equal(structs.EvalStatusPending, eval.Status)
require.NotZero(eval.CreateTime)
require.NotZero(eval.ModifyTime)
2017-04-15 23:47:19 +00:00
// Deregister and purge
dereg2 := &structs.JobDeregisterRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-04-15 23:47:19 +00:00
}
var resp3 structs.JobDeregisterResponse
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg2, &resp3))
require.NotZero(resp3.Index)
2017-04-15 23:47:19 +00:00
// Check for the job in the FSM
2018-03-14 22:32:18 +00:00
out, err = state.JobByID(nil, job.Namespace, job.ID)
require.Nil(err)
require.Nil(out)
2017-04-15 23:47:19 +00:00
// Lookup the evaluation
2018-03-14 22:32:18 +00:00
eval, err = state.EvalByID(nil, resp3.EvalID)
require.Nil(err)
require.NotNil(eval)
require.EqualValues(resp3.EvalCreateIndex, eval.CreateIndex)
require.Equal(job.Priority, eval.Priority)
require.Equal(job.Type, eval.Type)
require.Equal(structs.EvalTriggerJobDeregister, eval.TriggeredBy)
require.Equal(job.ID, eval.JobID)
require.Equal(structs.EvalStatusPending, eval.Status)
require.NotZero(eval.CreateTime)
require.NotZero(eval.ModifyTime)
}
2017-09-27 19:19:14 +00:00
func TestJobEndpoint_Deregister_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-09-27 19:19:14 +00:00
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
2017-09-27 19:19:14 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2017-09-27 19:19:14 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
// Create and register a job
job := mock.Job()
err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)
require.Nil(err)
2017-09-27 19:19:14 +00:00
// Deregister and purge
req := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Expect failure for request without a token
var resp structs.JobDeregisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", req, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-27 19:19:14 +00:00
// Expect failure for request with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-10-12 22:16:33 +00:00
req.AuthToken = invalidToken.SecretID
2017-09-27 19:19:14 +00:00
var invalidResp structs.JobDeregisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", req, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-27 19:19:14 +00:00
// Expect success with a valid management token
2017-10-12 22:16:33 +00:00
req.AuthToken = root.SecretID
2017-09-27 19:19:14 +00:00
var validResp structs.JobDeregisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", req, &validResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.NotEqual(validResp.Index, 0)
2017-09-27 19:19:14 +00:00
// Expect success with a valid token
validToken := mock.CreatePolicyAndToken(t, state, 1005, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob}))
2017-10-12 22:16:33 +00:00
req.AuthToken = validToken.SecretID
2017-09-27 19:19:14 +00:00
// Check for the job in the FSM
2018-03-14 22:32:18 +00:00
out, err := state.JobByID(nil, job.Namespace, job.ID)
require.Nil(err)
require.Nil(out)
2017-09-27 19:19:14 +00:00
// Lookup the evaluation
eval, err := state.EvalByID(nil, validResp.EvalID)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.NotNil(eval, nil)
require.Equal(eval.CreateIndex, validResp.EvalCreateIndex)
2018-03-14 22:32:18 +00:00
require.Equal(eval.Priority, structs.JobDefaultPriority)
require.Equal(eval.Type, structs.JobTypeService)
require.Equal(eval.TriggeredBy, structs.EvalTriggerJobDeregister)
require.Equal(eval.JobID, job.ID)
require.Equal(eval.JobModifyIndex, validResp.JobModifyIndex)
2018-03-14 22:32:18 +00:00
require.Equal(eval.Status, structs.EvalStatusPending)
require.NotZero(eval.CreateTime)
require.NotZero(eval.ModifyTime)
// Deregistration is idempotent
var validResp2 structs.JobDeregisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", req, &validResp2)
must.NoError(t, err)
must.Eq(t, "", validResp2.EvalID)
2017-09-27 19:19:14 +00:00
}
func TestJobEndpoint_Deregister_Nonexistent(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Deregister
jobID := "foo"
dereg := &structs.JobDeregisterRequest{
2017-09-07 23:56:15 +00:00
JobID: jobID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp2 structs.JobDeregisterResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2))
must.Eq(t, 0, resp2.JobModifyIndex, must.Sprint("expected no modify index"))
// Lookup the evaluation
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
eval, err := state.EvalsByJob(ws, structs.DefaultNamespace, jobID)
must.NoError(t, err)
must.Nil(t, eval)
}
func TestJobEndpoint_Deregister_EvalPriority(t *testing.T) {
ci.Parallel(t)
requireAssert := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Canonicalize()
// Register the job.
requireAssert.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}, &structs.JobRegisterResponse{}))
// Create the deregister request.
deregReq := &structs.JobDeregisterRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
EvalPriority: 99,
}
var deregResp structs.JobDeregisterResponse
requireAssert.NoError(msgpackrpc.CallWithCodec(codec, "Job.Deregister", deregReq, &deregResp))
// Grab the eval from the state, and check its priority is as expected.
out, err := s1.fsm.State().EvalByID(nil, deregResp.EvalID)
requireAssert.NoError(err)
requireAssert.Equal(99, out.Priority)
}
2015-12-01 19:40:40 +00:00
func TestJobEndpoint_Deregister_Periodic(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2015-12-01 19:40:40 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2015-12-01 19:40:40 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
2015-12-05 00:53:36 +00:00
job := mock.PeriodicJob()
2015-12-01 19:40:40 +00:00
reg := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2015-12-01 19:40:40 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Deregister
dereg := &structs.JobDeregisterRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2015-12-01 19:40:40 +00:00
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
2015-12-01 19:40:40 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("unexpected job")
}
if resp.EvalID != "" {
t.Fatalf("Deregister created an eval for a periodic job")
}
}
func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-12-02 23:37:26 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-12-02 23:37:26 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
2018-03-20 21:49:29 +00:00
job := mock.BatchJob()
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
2016-12-02 23:37:26 +00:00
reg := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-12-02 23:37:26 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Deregister
dereg := &structs.JobDeregisterRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-12-02 23:37:26 +00:00
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.JobModifyIndex == 0 {
t.Fatalf("bad index: %d", resp2.Index)
}
// Check for the node in the FSM
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
2016-12-02 23:37:26 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out != nil {
t.Fatalf("unexpected job")
}
if resp.EvalID != "" {
t.Fatalf("Deregister created an eval for a parameterized job")
2016-12-02 23:37:26 +00:00
}
}
// TestJobEndpoint_Deregister_EvalCreation asserts that job deregister creates
// an eval atomically with the registration
func TestJobEndpoint_Deregister_EvalCreation(t *testing.T) {
ci.Parallel(t)
Atomic eval insertion with job (de-)registration This fixes a bug where jobs may get "stuck" unprocessed that dispropotionately affect periodic jobs around leadership transitions. When registering a job, the job registration and the eval to process it get applied to raft as two separate transactions; if the job registration succeeds but eval application fails, the job may remain unprocessed. Operators may detect such failure, when submitting a job update and get a 500 error code, and they could retry; periodic jobs failures are more likely to go unnoticed, and no further periodic invocations will be processed until an operator force evaluation. This fixes the issue by ensuring that the job registration and eval application get persisted and processed atomically in the same raft log entry. Also, applies the same change to ensure atomicity in job deregistration. Backward Compatibility We must maintain compatibility in two scenarios: mixed clusters where a leader can handle atomic updates but followers cannot, and a recent cluster processes old log entries from legacy or mixed cluster mode. To handle this constraints: ensure that the leader continue to emit the Evaluation log entry until all servers have upgraded; also, when processing raft logs, the servers honor evaluations found in both spots, the Eval in job (de-)registration and the eval update entries. When an updated server sees mix-mode behavior where an eval is inserted into the raft log twice, it ignores the second instance. I made one compromise in consistency in the mixed-mode scenario: servers may disagree on the eval.CreateIndex value: the leader and updated servers will report the job registration index while old servers will report the index of the eval update log entry. This discripency doesn't seem to be material - it's the eval.JobModifyIndex that matters.
2020-07-10 17:31:55 +00:00
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
t.Run("job de-registration always create evals", func(t *testing.T) {
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
require.NoError(t, err)
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2)
require.NoError(t, err)
require.NotEmpty(t, resp2.EvalID)
state := s1.fsm.State()
eval, err := state.EvalByID(nil, resp2.EvalID)
require.Nil(t, err)
require.NotNil(t, eval)
require.EqualValues(t, resp2.EvalCreateIndex, eval.CreateIndex)
require.Nil(t, evalUpdateFromRaft(t, s1, eval.ID))
})
// Registering a parameterized job shouldn't create an eval
t.Run("periodic jobs shouldn't create an eval", func(t *testing.T) {
job := mock.PeriodicJob()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
require.NoError(t, err)
require.NotZero(t, resp.Index)
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2)
require.NoError(t, err)
require.Empty(t, resp2.EvalID)
})
}
func TestJobEndpoint_Deregister_NoShutdownDelay(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register requests
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp0 structs.JobRegisterResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp0))
// Deregister but don't purge
dereg1 := &structs.JobDeregisterRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp1 structs.JobDeregisterResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg1, &resp1))
require.NotZero(resp1.Index)
// Check for the job in the FSM
state := s1.fsm.State()
out, err := state.JobByID(nil, job.Namespace, job.ID)
require.NoError(err)
require.NotNil(out)
require.True(out.Stop)
// Lookup the evaluation
eval, err := state.EvalByID(nil, resp1.EvalID)
require.NoError(err)
require.NotNil(eval)
require.EqualValues(resp1.EvalCreateIndex, eval.CreateIndex)
require.Equal(structs.EvalTriggerJobDeregister, eval.TriggeredBy)
// Lookup allocation transitions
var ws memdb.WatchSet
allocs, err := state.AllocsByJob(ws, job.Namespace, job.ID, true)
require.NoError(err)
for _, alloc := range allocs {
require.Nil(alloc.DesiredTransition)
}
// Deregister with no shutdown delay
dereg2 := &structs.JobDeregisterRequest{
JobID: job.ID,
NoShutdownDelay: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg2, &resp2))
require.NotZero(resp2.Index)
// Lookup the evaluation
eval, err = state.EvalByID(nil, resp2.EvalID)
require.NoError(err)
require.NotNil(eval)
require.EqualValues(resp2.EvalCreateIndex, eval.CreateIndex)
require.Equal(structs.EvalTriggerJobDeregister, eval.TriggeredBy)
// Lookup allocation transitions
allocs, err = state.AllocsByJob(ws, job.Namespace, job.ID, true)
require.NoError(err)
for _, alloc := range allocs {
require.NotNil(alloc.DesiredTransition)
require.True(*(alloc.DesiredTransition.NoShutdownDelay))
}
}
2018-03-14 22:32:18 +00:00
func TestJobEndpoint_BatchDeregister(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2018-03-14 22:32:18 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2018-03-14 22:32:18 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register requests
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp))
job2 := mock.Job()
job2.Priority = 1
reg2 := &structs.JobRegisterRequest{
Job: job2,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job2.Namespace,
},
}
// Fetch the response
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", reg2, &resp))
// Deregister
dereg := &structs.JobBatchDeregisterRequest{
Jobs: map[structs.NamespacedID]*structs.JobDeregisterOptions{
2018-03-16 23:31:16 +00:00
{
2018-03-14 22:32:18 +00:00
ID: job.ID,
Namespace: job.Namespace,
2018-03-16 23:31:16 +00:00
}: {},
{
2018-03-14 22:32:18 +00:00
ID: job2.ID,
Namespace: job2.Namespace,
2018-03-16 23:31:16 +00:00
}: {
2018-03-14 22:32:18 +00:00
Purge: true,
},
},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobBatchDeregisterResponse
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.BatchDeregister", dereg, &resp2))
require.NotZero(resp2.Index)
// Check for the job in the FSM
state := s1.fsm.State()
out, err := state.JobByID(nil, job.Namespace, job.ID)
require.Nil(err)
require.NotNil(out)
require.True(out.Stop)
out, err = state.JobByID(nil, job2.Namespace, job2.ID)
require.Nil(err)
require.Nil(out)
// Lookup the evaluation
for jobNS, eval := range resp2.JobEvals {
expectedJob := job
if jobNS.ID != job.ID {
expectedJob = job2
}
eval, err := state.EvalByID(nil, eval)
require.Nil(err)
require.NotNil(eval)
require.EqualValues(resp2.Index, eval.CreateIndex)
require.Equal(expectedJob.Priority, eval.Priority)
require.Equal(expectedJob.Type, eval.Type)
require.Equal(structs.EvalTriggerJobDeregister, eval.TriggeredBy)
require.Equal(expectedJob.ID, eval.JobID)
require.Equal(structs.EvalStatusPending, eval.Status)
require.NotZero(eval.CreateTime)
require.NotZero(eval.ModifyTime)
2018-03-14 22:32:18 +00:00
}
}
func TestJobEndpoint_BatchDeregister_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
2018-03-14 22:32:18 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2018-03-14 22:32:18 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
// Create and register a job
job, job2 := mock.Job(), mock.Job()
require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job))
require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job2))
2018-03-14 22:32:18 +00:00
// Deregister
req := &structs.JobBatchDeregisterRequest{
Jobs: map[structs.NamespacedID]*structs.JobDeregisterOptions{
2018-03-16 23:31:16 +00:00
{
2018-03-14 22:32:18 +00:00
ID: job.ID,
Namespace: job.Namespace,
2018-03-16 23:31:16 +00:00
}: {},
{
2018-03-14 22:32:18 +00:00
ID: job2.ID,
Namespace: job2.Namespace,
2018-03-16 23:31:16 +00:00
}: {},
2018-03-14 22:32:18 +00:00
},
WriteRequest: structs.WriteRequest{
Region: "global",
},
}
// Expect failure for request without a token
var resp structs.JobBatchDeregisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.BatchDeregister", req, &resp)
require.NotNil(err)
require.True(structs.IsErrPermissionDenied(err))
// Expect failure for request with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
req.AuthToken = invalidToken.SecretID
var invalidResp structs.JobDeregisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.BatchDeregister", req, &invalidResp)
require.NotNil(err)
require.True(structs.IsErrPermissionDenied(err))
// Expect success with a valid management token
req.AuthToken = root.SecretID
var validResp structs.JobDeregisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.BatchDeregister", req, &validResp)
require.Nil(err)
require.NotEqual(validResp.Index, 0)
// Expect success with a valid token
validToken := mock.CreatePolicyAndToken(t, state, 1005, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob}))
req.AuthToken = validToken.SecretID
var validResp2 structs.JobDeregisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.BatchDeregister", req, &validResp2)
require.Nil(err)
require.NotEqual(validResp2.Index, 0)
}
func TestJobEndpoint_Deregister_Priority(t *testing.T) {
ci.Parallel(t)
requireAssertion := require.New(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
fsmState := s1.fsm.State()
// Create a job which a custom priority and register this.
job := mock.Job()
job.Priority = 90
err := fsmState.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)
requireAssertion.Nil(err)
// Deregister.
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobDeregisterResponse
requireAssertion.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp))
requireAssertion.NotZero(resp.Index)
// Check for the job in the FSM which should not be there as it was purged.
out, err := fsmState.JobByID(nil, job.Namespace, job.ID)
requireAssertion.Nil(err)
requireAssertion.Nil(out)
// Lookup the evaluation
eval, err := fsmState.EvalByID(nil, resp.EvalID)
requireAssertion.Nil(err)
requireAssertion.NotNil(eval)
requireAssertion.EqualValues(resp.EvalCreateIndex, eval.CreateIndex)
requireAssertion.Equal(job.Priority, eval.Priority)
requireAssertion.Equal(job.Type, eval.Type)
requireAssertion.Equal(structs.EvalTriggerJobDeregister, eval.TriggeredBy)
requireAssertion.Equal(job.ID, eval.JobID)
requireAssertion.Equal(structs.EvalStatusPending, eval.Status)
requireAssertion.NotZero(eval.CreateTime)
requireAssertion.NotZero(eval.ModifyTime)
}
func TestJobEndpoint_GetJob(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
reg := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
job.CreateIndex = resp.JobModifyIndex
job.ModifyIndex = resp.JobModifyIndex
2016-01-12 17:50:33 +00:00
job.JobModifyIndex = resp.JobModifyIndex
// Lookup the job
get := &structs.JobSpecificRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
2015-11-27 03:43:02 +00:00
// Make a copy of the origin job and change the service name so that we can
// do a deep equal with the response from the GET JOB Api
j := job
2016-06-12 23:36:49 +00:00
j.TaskGroups[0].Tasks[0].Services[0].Name = "web-frontend"
2015-12-11 00:38:29 +00:00
for tgix, tg := range j.TaskGroups {
for tidx, t := range tg.Tasks {
2016-06-12 23:36:49 +00:00
for sidx, service := range t.Services {
2015-12-11 00:38:29 +00:00
for cidx, check := range service.Checks {
2016-06-12 23:36:49 +00:00
check.Name = resp2.Job.TaskGroups[tgix].Tasks[tidx].Services[sidx].Checks[cidx].Name
2015-12-11 00:38:29 +00:00
}
}
}
}
// Clear the submit times
j.SubmitTime = 0
resp2.Job.SubmitTime = 0
2015-11-27 03:43:02 +00:00
if !reflect.DeepEqual(j, resp2.Job) {
t.Fatalf("bad: %#v %#v", job, resp2.Job)
}
// Lookup non-existing job
get.JobID = "foobarbaz"
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
if resp2.Job != nil {
t.Fatalf("unexpected job")
}
}
2015-09-06 19:18:45 +00:00
2017-09-26 17:38:03 +00:00
func TestJobEndpoint_GetJob_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-09-26 17:38:03 +00:00
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
2017-09-26 17:38:03 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
// Create the job
job := mock.Job()
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-26 17:38:03 +00:00
// Lookup the job
get := &structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Looking up the job without a token should fail
var resp structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-26 17:38:03 +00:00
// Expect failure for request with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-09-26 17:38:03 +00:00
2017-10-12 22:16:33 +00:00
get.AuthToken = invalidToken.SecretID
2017-09-26 17:38:03 +00:00
var invalidResp structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-26 17:38:03 +00:00
// Looking up the job with a management token should succeed
2017-10-12 22:16:33 +00:00
get.AuthToken = root.SecretID
2017-09-26 17:38:03 +00:00
var validResp structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &validResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.Equal(job.ID, validResp.Job.ID)
2017-09-26 17:38:03 +00:00
// Looking up the job with a valid token should succeed
validToken := mock.CreatePolicyAndToken(t, state, 1005, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
2017-09-26 17:38:03 +00:00
2017-10-12 22:16:33 +00:00
get.AuthToken = validToken.SecretID
2017-09-26 17:38:03 +00:00
var validResp2 structs.SingleJobResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &validResp2)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.Equal(job.ID, validResp2.Job.ID)
2017-09-26 17:38:03 +00:00
}
2017-04-13 22:47:59 +00:00
func TestJobEndpoint_GetJob_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
2017-04-13 22:47:59 +00:00
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the jobs
job1 := mock.Job()
job2 := mock.Job()
// Upsert a job we are not interested in first.
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job1); err != nil {
2017-04-13 22:47:59 +00:00
t.Fatalf("err: %v", err)
}
})
// Upsert another job later which should trigger the watch.
time.AfterFunc(200*time.Millisecond, func() {
if err := state.UpsertJob(structs.MsgTypeTestSetup, 200, nil, job2); err != nil {
2017-04-13 22:47:59 +00:00
t.Fatalf("err: %v", err)
}
})
req := &structs.JobSpecificRequest{
JobID: job2.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
2017-09-07 23:56:15 +00:00
Namespace: job2.Namespace,
2017-04-13 22:47:59 +00:00
MinQueryIndex: 150,
},
}
start := time.Now()
var resp structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if resp.Job == nil || resp.Job.ID != job2.ID {
t.Fatalf("bad: %#v", resp.Job)
}
// Job delete fires watches
time.AfterFunc(100*time.Millisecond, func() {
2017-09-07 23:56:15 +00:00
if err := state.DeleteJob(300, job2.Namespace, job2.ID); err != nil {
2017-04-13 22:47:59 +00:00
t.Fatalf("err: %v", err)
}
})
req.QueryOptions.MinQueryIndex = 250
start = time.Now()
var resp2 structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
}
if resp2.Index != 300 {
t.Fatalf("Bad index: %d %d", resp2.Index, 300)
}
if resp2.Job != nil {
t.Fatalf("bad: %#v", resp2.Job)
}
}
func TestJobEndpoint_GetJobVersions(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
2017-04-13 22:47:59 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Priority = 88
reg := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2017-04-13 22:47:59 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Register the job again to create another version
job.Priority = 100
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the job
get := &structs.JobVersionsRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
2017-04-13 22:47:59 +00:00
}
var versionsResp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
t.Fatalf("err: %v", err)
}
if versionsResp.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", versionsResp.Index, resp.Index)
}
// Make sure there are two job versions
versions := versionsResp.Versions
if l := len(versions); l != 2 {
t.Fatalf("Got %d versions; want 2", l)
}
if v := versions[0]; v.Priority != 100 || v.ID != job.ID || v.Version != 1 {
t.Fatalf("bad: %+v", v)
}
if v := versions[1]; v.Priority != 88 || v.ID != job.ID || v.Version != 0 {
t.Fatalf("bad: %+v", v)
}
// Lookup non-existing job
get.JobID = "foobarbaz"
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
t.Fatalf("err: %v", err)
}
if versionsResp.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", versionsResp.Index, resp.Index)
}
if l := len(versionsResp.Versions); l != 0 {
t.Fatalf("unexpected versions: %d", l)
}
}
func TestJobEndpoint_GetJobSubmission(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
t.Cleanup(cleanupS1)
codec := rpcClient(t, s1)
testutil.WaitForLeaders(t, s1.RPC)
// create a job to register and make queries about
job := mock.Job()
registerRequest := &structs.JobRegisterRequest{
Job: job,
Submission: &structs.JobSubmission{
Source: "job \"my-job\" { group \"g1\" {} }",
Format: "hcl2",
VariableFlags: map[string]string{"one": "1"},
Variables: "two = 2",
},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// register the job first ime
var registerResponse structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", registerRequest, &registerResponse)
must.NoError(t, err)
indexV0 := registerResponse.Index
// register the job a second time, creating another version (v0, v1)
// with a new job source file and variables
job.Priority++ // trigger new version
registerRequest.Submission = &structs.JobSubmission{
Source: "job \"my-job\" { group \"g2\" {} }",
Format: "hcl2",
VariableFlags: map[string]string{"three": "3"},
Variables: "four = 4",
}
err = msgpackrpc.CallWithCodec(codec, "Job.Register", registerRequest, &registerResponse)
must.NoError(t, err)
indexV1 := registerResponse.Index
// lookup the submission for v0
submissionRequestV0 := &structs.JobSubmissionRequest{
JobID: job.ID,
Version: 0,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var submissionResponse structs.JobSubmissionResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequestV0, &submissionResponse)
must.NoError(t, err)
sub := submissionResponse.Submission
must.StrContains(t, sub.Source, "g1")
must.Eq(t, "hcl2", sub.Format)
must.Eq(t, map[string]string{"one": "1"}, sub.VariableFlags)
must.Eq(t, "two = 2", sub.Variables)
must.Eq(t, job.Namespace, sub.Namespace)
must.Eq(t, indexV0, sub.JobModifyIndex)
// lookup the submission for v1
submissionRequestV1 := &structs.JobSubmissionRequest{
JobID: job.ID,
Version: 1,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var submissionResponseV1 structs.JobSubmissionResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequestV1, &submissionResponseV1)
must.NoError(t, err)
sub = submissionResponseV1.Submission
must.StrContains(t, sub.Source, "g2")
must.Eq(t, "hcl2", sub.Format)
must.Eq(t, map[string]string{"three": "3"}, sub.VariableFlags)
must.Eq(t, "four = 4", sub.Variables)
must.Eq(t, job.Namespace, sub.Namespace)
must.Eq(t, indexV1, sub.JobModifyIndex)
// lookup non-existent submission v2
submissionRequestV2 := &structs.JobSubmissionRequest{
JobID: job.ID,
Version: 2,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var submissionResponseV2 structs.JobSubmissionResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequestV2, &submissionResponseV2)
must.NoError(t, err)
must.Nil(t, submissionResponseV2.Submission)
// create a deregister request
deRegisterRequest := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true, // force gc
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var deRegisterResponse structs.JobDeregisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Deregister", deRegisterRequest, &deRegisterResponse)
must.NoError(t, err)
// lookup the submission for v0 again
submissionRequestV0 = &structs.JobSubmissionRequest{
JobID: job.ID,
Version: 0,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// should no longer exist
var submissionResponseGone structs.JobSubmissionResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequestV0, &submissionResponseGone)
must.NoError(t, err)
must.Nil(t, submissionResponseGone.Submission, must.Sprintf("got sub: %#v", submissionResponseGone.Submission))
}
func TestJobEndpoint_GetJobSubmission_ACL(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
t.Cleanup(cleanupS1)
codec := rpcClient(t, s1)
testutil.WaitForLeaders(t, s1.RPC)
// create a namespace to upsert
namespaceUpsertRequest := &structs.NamespaceUpsertRequest{
Namespaces: []*structs.Namespace{{
Name: "hashicorp",
Description: "My Namespace",
}},
WriteRequest: structs.WriteRequest{
Region: "global",
AuthToken: root.SecretID,
},
}
var namespaceUpsertResponse structs.GenericResponse
err := msgpackrpc.CallWithCodec(codec, "Namespace.UpsertNamespaces", namespaceUpsertRequest, &namespaceUpsertResponse)
must.NoError(t, err)
// create a job to register and make queries about
job := mock.Job()
job.Namespace = "hashicorp"
registerRequest := &structs.JobRegisterRequest{
Job: job,
Submission: &structs.JobSubmission{
Source: "job \"my-job\" { group \"g1\" {} }",
Format: "hcl2",
VariableFlags: map[string]string{"one": "1"},
Variables: "two = 2",
},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
AuthToken: root.SecretID,
},
}
// register the job
var registerResponse structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Register", registerRequest, &registerResponse)
must.NoError(t, err)
// make a request with no token set
submissionRequest := &structs.JobSubmissionRequest{
JobID: job.ID,
Version: 0,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var submissionResponse structs.JobSubmissionResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequest, &submissionResponse)
must.ErrorContains(t, err, "Permission denied")
// make request with token set
submissionRequest.QueryOptions.AuthToken = root.SecretID
var submissionResponse2 structs.JobSubmissionResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJobSubmission", submissionRequest, &submissionResponse2)
must.NoError(t, err)
}
func TestJobEndpoint_GetJobVersions_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
// Create two versions of a job with different priorities
job := mock.Job()
job.Priority = 88
err := state.UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)
2018-03-14 22:32:18 +00:00
require.Nil(err)
job.Priority = 100
err = state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)
2018-03-14 22:32:18 +00:00
require.Nil(err)
// Lookup the job
get := &structs.JobVersionsRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Attempt to fetch without a token should fail
var resp structs.JobVersionsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
// Expect failure for request with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-10-12 22:16:33 +00:00
get.AuthToken = invalidToken.SecretID
var invalidResp structs.JobVersionsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
// Expect success for request with a valid management token
2017-10-12 22:16:33 +00:00
get.AuthToken = root.SecretID
var validResp structs.JobVersionsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &validResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
// Expect success for request with a valid token
validToken := mock.CreatePolicyAndToken(t, state, 1005, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
2017-10-12 22:16:33 +00:00
get.AuthToken = validToken.SecretID
var validResp2 structs.JobVersionsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &validResp2)
2018-03-14 22:32:18 +00:00
require.Nil(err)
// Make sure there are two job versions
versions := validResp2.Versions
2018-03-14 22:32:18 +00:00
require.Equal(2, len(versions))
require.Equal(versions[0].ID, job.ID)
require.Equal(versions[1].ID, job.ID)
}
func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
job.Priority = 88
reg := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Register the job again to create another version
job.Priority = 90
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Register the job again to create another version
job.Priority = 100
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the job
get := &structs.JobVersionsRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
Diffs: true,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var versionsResp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
t.Fatalf("err: %v", err)
}
if versionsResp.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", versionsResp.Index, resp.Index)
}
// Make sure there are two job versions
versions := versionsResp.Versions
if l := len(versions); l != 3 {
t.Fatalf("Got %d versions; want 3", l)
}
if v := versions[0]; v.Priority != 100 || v.ID != job.ID || v.Version != 2 {
t.Fatalf("bad: %+v", v)
}
if v := versions[1]; v.Priority != 90 || v.ID != job.ID || v.Version != 1 {
t.Fatalf("bad: %+v", v)
}
if v := versions[2]; v.Priority != 88 || v.ID != job.ID || v.Version != 0 {
t.Fatalf("bad: %+v", v)
}
// Ensure we got diffs
diffs := versionsResp.Diffs
if l := len(diffs); l != 2 {
t.Fatalf("Got %d diffs; want 2", l)
}
d1 := diffs[0]
if len(d1.Fields) != 1 {
t.Fatalf("Got too many diffs: %#v", d1)
}
if d1.Fields[0].Name != "Priority" {
t.Fatalf("Got wrong field: %#v", d1)
}
if d1.Fields[0].Old != "90" && d1.Fields[0].New != "100" {
t.Fatalf("Got wrong field values: %#v", d1)
}
d2 := diffs[1]
if len(d2.Fields) != 1 {
t.Fatalf("Got too many diffs: %#v", d2)
}
if d2.Fields[0].Name != "Priority" {
t.Fatalf("Got wrong field: %#v", d2)
}
if d2.Fields[0].Old != "88" && d1.Fields[0].New != "90" {
t.Fatalf("Got wrong field values: %#v", d2)
}
}
2017-04-13 22:47:59 +00:00
func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
2017-04-13 22:47:59 +00:00
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the jobs
job1 := mock.Job()
job2 := mock.Job()
job3 := mock.Job()
job3.ID = job2.ID
job3.Priority = 1
// Upsert a job we are not interested in first.
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job1); err != nil {
2017-04-13 22:47:59 +00:00
t.Fatalf("err: %v", err)
}
})
// Upsert another job later which should trigger the watch.
time.AfterFunc(200*time.Millisecond, func() {
if err := state.UpsertJob(structs.MsgTypeTestSetup, 200, nil, job2); err != nil {
2017-04-13 22:47:59 +00:00
t.Fatalf("err: %v", err)
}
})
req := &structs.JobVersionsRequest{
2017-04-13 22:47:59 +00:00
JobID: job2.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
2017-09-07 23:56:15 +00:00
Namespace: job2.Namespace,
2017-04-13 22:47:59 +00:00
MinQueryIndex: 150,
},
}
start := time.Now()
var resp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if len(resp.Versions) != 1 || resp.Versions[0].ID != job2.ID {
t.Fatalf("bad: %#v", resp.Versions)
}
// Upsert the job again which should trigger the watch.
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(structs.MsgTypeTestSetup, 300, nil, job3); err != nil {
2017-04-13 22:47:59 +00:00
t.Fatalf("err: %v", err)
}
})
req2 := &structs.JobVersionsRequest{
2017-04-13 22:47:59 +00:00
JobID: job3.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
2017-09-07 23:56:15 +00:00
Namespace: job3.Namespace,
2017-04-13 22:47:59 +00:00
MinQueryIndex: 250,
},
}
var resp2 structs.JobVersionsResponse
start = time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", req2, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp2.Index != 300 {
t.Fatalf("Bad index: %d %d", resp.Index, 300)
}
if len(resp2.Versions) != 2 {
t.Fatalf("bad: %#v", resp2.Versions)
}
}
2016-07-21 20:04:38 +00:00
func TestJobEndpoint_GetJobSummary(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-08-23 21:31:18 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-07-21 20:04:38 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
reg := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-07-21 20:04:38 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &resp); err != nil {
t.Fatalf("err: %v", err)
}
job.CreateIndex = resp.JobModifyIndex
job.ModifyIndex = resp.JobModifyIndex
job.JobModifyIndex = resp.JobModifyIndex
2016-07-21 21:43:21 +00:00
// Lookup the job summary
2016-07-21 20:04:38 +00:00
get := &structs.JobSummaryRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
2016-07-21 20:04:38 +00:00
}
2016-07-21 21:43:21 +00:00
var resp2 structs.JobSummaryResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", get, &resp2); err != nil {
2016-07-21 20:04:38 +00:00
t.Fatalf("err: %v", err)
}
if resp2.Index != resp.JobModifyIndex {
t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
}
expectedJobSummary := structs.JobSummary{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
Namespace: job.Namespace,
2016-07-21 20:04:38 +00:00
Summary: map[string]structs.TaskGroupSummary{
2017-09-26 22:26:33 +00:00
"web": {},
2016-07-21 20:04:38 +00:00
},
2016-12-16 18:21:56 +00:00
Children: new(structs.JobChildrenSummary),
2016-07-21 20:04:38 +00:00
CreateIndex: job.CreateIndex,
ModifyIndex: job.CreateIndex,
}
if !reflect.DeepEqual(resp2.JobSummary, &expectedJobSummary) {
2018-03-11 17:57:01 +00:00
t.Fatalf("expected: %v, actual: %v", expectedJobSummary, resp2.JobSummary)
2016-07-21 20:04:38 +00:00
}
}
2017-09-14 14:52:50 +00:00
func TestJobEndpoint_Summary_ACL(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
2017-09-14 14:52:50 +00:00
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
2017-09-14 14:52:50 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
2017-09-14 14:52:50 +00:00
// Create the job
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
2017-10-12 22:16:33 +00:00
reg.AuthToken = root.SecretID
2017-09-14 14:52:50 +00:00
2017-09-14 20:05:57 +00:00
var err error
2017-09-14 18:29:55 +00:00
// Register the job with a valid token
2017-09-14 14:52:50 +00:00
var regResp structs.JobRegisterResponse
2017-09-14 20:05:57 +00:00
err = msgpackrpc.CallWithCodec(codec, "Job.Register", reg, &regResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-14 14:52:50 +00:00
job.CreateIndex = regResp.JobModifyIndex
job.ModifyIndex = regResp.JobModifyIndex
job.JobModifyIndex = regResp.JobModifyIndex
req := &structs.JobSummaryRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Expect failure for request without a token
var resp structs.JobSummaryResponse
2017-09-14 20:05:57 +00:00
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
2017-09-14 20:05:57 +00:00
expectedJobSummary := &structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
2017-09-26 22:26:33 +00:00
"web": {},
2017-09-14 20:05:57 +00:00
},
Children: new(structs.JobChildrenSummary),
CreateIndex: job.CreateIndex,
ModifyIndex: job.ModifyIndex,
2017-09-14 18:29:55 +00:00
}
2017-09-14 20:05:57 +00:00
// Expect success when using a management token
2017-10-12 22:16:33 +00:00
req.AuthToken = root.SecretID
2017-09-14 20:05:57 +00:00
var mgmtResp structs.JobSummaryResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &mgmtResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.Equal(expectedJobSummary, mgmtResp.JobSummary)
2017-09-14 20:05:57 +00:00
2017-09-14 18:29:55 +00:00
// Create the namespace policy and tokens
state := s1.fsm.State()
2017-09-14 18:29:55 +00:00
// Expect failure for request with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-09-14 18:29:55 +00:00
2017-10-12 22:16:33 +00:00
req.AuthToken = invalidToken.SecretID
2017-09-14 19:17:05 +00:00
var invalidResp structs.JobSummaryResponse
2017-09-14 20:05:57 +00:00
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
2017-09-14 14:52:50 +00:00
// Try with a valid token
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
2017-09-14 18:29:55 +00:00
2017-10-12 22:16:33 +00:00
req.AuthToken = validToken.SecretID
2017-09-14 14:52:50 +00:00
var authResp structs.JobSummaryResponse
2017-09-14 20:05:57 +00:00
err = msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &authResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.Equal(expectedJobSummary, authResp.JobSummary)
2017-09-14 14:52:50 +00:00
}
func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create a job and insert it
job1 := mock.Job()
time.AfterFunc(200*time.Millisecond, func() {
if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job1); err != nil {
t.Fatalf("err: %v", err)
}
})
// Ensure the job summary request gets fired
req := &structs.JobSummaryRequest{
JobID: job1.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
2017-09-07 23:56:15 +00:00
Namespace: job1.Namespace,
MinQueryIndex: 50,
},
}
var resp structs.JobSummaryResponse
start := time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
// Upsert an allocation for the job which should trigger the watch.
time.AfterFunc(200*time.Millisecond, func() {
alloc := mock.Alloc()
alloc.JobID = job1.ID
alloc.Job = job1
if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc}); err != nil {
t.Fatalf("err: %v", err)
}
})
req = &structs.JobSummaryRequest{
JobID: job1.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
2017-09-07 23:56:15 +00:00
Namespace: job1.Namespace,
MinQueryIndex: 199,
},
}
start = time.Now()
var resp1 structs.JobSummaryResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp1); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp1.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if resp1.JobSummary == nil {
t.Fatalf("bad: %#v", resp)
}
// Job delete fires watches
time.AfterFunc(100*time.Millisecond, func() {
2017-09-07 23:56:15 +00:00
if err := state.DeleteJob(300, job1.Namespace, job1.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
req.QueryOptions.MinQueryIndex = 250
start = time.Now()
var resp2 structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
}
if resp2.Index != 300 {
t.Fatalf("Bad index: %d %d", resp2.Index, 300)
}
if resp2.Job != nil {
t.Fatalf("bad: %#v", resp2.Job)
}
}
2015-09-06 19:18:45 +00:00
func TestJobEndpoint_ListJobs(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
2015-09-06 19:18:45 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
state := s1.fsm.State()
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
require.NoError(t, err)
2015-09-06 19:18:45 +00:00
// Lookup the jobs
get := &structs.JobListRequest{
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
2015-09-06 19:18:45 +00:00
}
var resp2 structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp2)
require.NoError(t, err)
require.Equal(t, uint64(1000), resp2.Index)
require.Len(t, resp2.Jobs, 1)
require.Equal(t, job.ID, resp2.Jobs[0].ID)
require.Equal(t, job.Namespace, resp2.Jobs[0].Namespace)
require.Nil(t, resp2.Jobs[0].Meta)
// Lookup the jobs by prefix
get = &structs.JobListRequest{
2017-09-07 23:56:15 +00:00
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
Prefix: resp2.Jobs[0].ID[:4],
},
}
var resp3 structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp3)
require.NoError(t, err)
require.Equal(t, uint64(1000), resp3.Index)
require.Len(t, resp3.Jobs, 1)
require.Equal(t, job.ID, resp3.Jobs[0].ID)
require.Equal(t, job.Namespace, resp3.Jobs[0].Namespace)
// Lookup jobs with a meta parameter
get = &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
Prefix: resp2.Jobs[0].ID[:4],
},
Fields: &structs.JobStubFields{
Meta: true,
},
}
var resp4 structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp4)
require.NoError(t, err)
require.Equal(t, job.Meta["owner"], resp4.Jobs[0].Meta["owner"])
2015-09-06 19:18:45 +00:00
}
// TestJobEndpoint_ListJobs_AllNamespaces_OSS asserts that server
// returns all jobs across namespace.
func TestJobEndpoint_ListJobs_AllNamespaces_OSS(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
state := s1.fsm.State()
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the jobs
get := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: "*",
},
}
var resp2 structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp2)
require.NoError(t, err)
require.Equal(t, uint64(1000), resp2.Index)
require.Len(t, resp2.Jobs, 1)
require.Equal(t, job.ID, resp2.Jobs[0].ID)
require.Equal(t, structs.DefaultNamespace, resp2.Jobs[0].Namespace)
// Lookup the jobs by prefix
get = &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: "*",
Prefix: resp2.Jobs[0].ID[:4],
},
}
var resp3 structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp3)
require.NoError(t, err)
require.Equal(t, uint64(1000), resp3.Index)
require.Len(t, resp3.Jobs, 1)
require.Equal(t, job.ID, resp3.Jobs[0].ID)
require.Equal(t, structs.DefaultNamespace, resp2.Jobs[0].Namespace)
// Lookup the jobs by prefix
get = &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: "*",
Prefix: "z" + resp2.Jobs[0].ID[:4],
},
}
var resp4 structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp4)
require.NoError(t, err)
require.Equal(t, uint64(1000), resp4.Index)
require.Empty(t, resp4.Jobs)
}
2017-09-14 22:46:00 +00:00
func TestJobEndpoint_ListJobs_WithACL(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
2017-09-14 22:46:00 +00:00
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
2017-09-14 22:46:00 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
2017-09-14 22:46:00 +00:00
var err error
// Create the register request
job := mock.Job()
err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-14 22:46:00 +00:00
req := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Expect failure for request without a token
var resp structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
2017-09-14 22:46:00 +00:00
// Expect success for request with a management token
var mgmtResp structs.JobListResponse
2017-10-12 22:16:33 +00:00
req.AuthToken = root.SecretID
2017-09-14 22:46:00 +00:00
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &mgmtResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.Equal(1, len(mgmtResp.Jobs))
require.Equal(job.ID, mgmtResp.Jobs[0].ID)
2017-09-14 22:46:00 +00:00
// Expect failure for request with a token that has incorrect permissions
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
2017-09-14 22:46:00 +00:00
2017-10-12 22:16:33 +00:00
req.AuthToken = invalidToken.SecretID
2017-09-14 22:46:00 +00:00
var invalidResp structs.JobListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
2017-09-14 22:46:00 +00:00
// Try with a valid token with correct permissions
validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-09-14 22:46:00 +00:00
var validResp structs.JobListResponse
2017-10-12 22:16:33 +00:00
req.AuthToken = validToken.SecretID
2017-09-14 22:46:00 +00:00
err = msgpackrpc.CallWithCodec(codec, "Job.List", req, &validResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.Equal(1, len(validResp.Jobs))
require.Equal(job.ID, validResp.Jobs[0].ID)
2017-09-14 22:46:00 +00:00
}
2015-10-30 02:00:02 +00:00
func TestJobEndpoint_ListJobs_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the job
job := mock.Job()
// Upsert job triggers watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job); err != nil {
t.Fatalf("err: %v", err)
}
})
req := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
2017-09-07 23:56:15 +00:00
Namespace: job.Namespace,
2015-10-30 02:00:02 +00:00
MinQueryIndex: 50,
},
}
start := time.Now()
var resp structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
2015-10-30 15:27:47 +00:00
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
2015-10-30 02:00:02 +00:00
if resp.Index != 100 {
t.Fatalf("Bad index: %d %d", resp.Index, 100)
}
if len(resp.Jobs) != 1 || resp.Jobs[0].ID != job.ID {
2017-02-08 06:10:33 +00:00
t.Fatalf("bad: %#v", resp)
}
// Job deletion triggers watches
time.AfterFunc(100*time.Millisecond, func() {
2017-09-07 23:56:15 +00:00
if err := state.DeleteJob(200, job.Namespace, job.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
2015-10-30 02:00:02 +00:00
req.MinQueryIndex = 150
start = time.Now()
var resp2 structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
2015-10-30 15:27:47 +00:00
if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
2015-10-29 01:35:48 +00:00
t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
}
2015-10-30 02:00:02 +00:00
if resp2.Index != 200 {
t.Fatalf("Bad index: %d %d", resp2.Index, 200)
}
if len(resp2.Jobs) != 0 {
2017-02-08 06:10:33 +00:00
t.Fatalf("bad: %#v", resp2)
}
}
func TestJobEndpoint_ListJobs_PaginationFiltering(t *testing.T) {
ci.Parallel(t)
s1, _, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// create a set of jobs. these are in the order that the state store will
// return them from the iterator (sorted by key) for ease of writing tests
mocks := []struct {
name string
namespace string
status string
}{
{name: "job-01"}, // 0
{name: "job-02"}, // 1
{name: "job-03", namespace: "non-default"}, // 2
{name: "job-04"}, // 3
{name: "job-05", status: structs.JobStatusRunning}, // 4
{name: "job-06", status: structs.JobStatusRunning}, // 5
{}, // 6, missing job
{name: "job-08"}, // 7
{name: "job-03", namespace: "other"}, // 8, same name but in another namespace
}
state := s1.fsm.State()
require.NoError(t, state.UpsertNamespaces(999, []*structs.Namespace{{Name: "non-default"}, {Name: "other"}}))
for i, m := range mocks {
if m.name == "" {
continue
}
index := 1000 + uint64(i)
job := mock.Job()
job.ID = m.name
job.Name = m.name
job.Status = m.status
if m.namespace != "" { // defaults to "default"
job.Namespace = m.namespace
}
job.CreateIndex = index
require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, index, nil, job))
}
aclToken := mock.CreatePolicyAndToken(t, state, 1100, "test-valid-read",
mock.NamespacePolicy("*", "read", nil)).
SecretID
cases := []struct {
name string
namespace string
prefix string
filter string
nextToken string
pageSize int32
expectedNextToken string
expectedIDs []string
expectedError string
}{
{
name: "test01 size-2 page-1 default NS",
pageSize: 2,
expectedNextToken: "default.job-04",
expectedIDs: []string{"job-01", "job-02"},
},
{
name: "test02 size-2 page-1 default NS with prefix",
prefix: "job",
pageSize: 2,
expectedNextToken: "default.job-04",
expectedIDs: []string{"job-01", "job-02"},
},
{
name: "test03 size-2 page-2 default NS",
pageSize: 2,
nextToken: "default.job-04",
expectedNextToken: "default.job-06",
expectedIDs: []string{"job-04", "job-05"},
},
{
name: "test04 size-2 page-2 default NS with prefix",
prefix: "job",
pageSize: 2,
nextToken: "default.job-04",
expectedNextToken: "default.job-06",
expectedIDs: []string{"job-04", "job-05"},
},
{
name: "test05 no valid results with filters and prefix",
prefix: "not-job",
pageSize: 2,
nextToken: "",
expectedIDs: []string{},
},
{
name: "test06 go-bexpr filter",
namespace: "*",
filter: `Name matches "job-0[123]"`,
expectedIDs: []string{"job-01", "job-02", "job-03", "job-03"},
},
{
name: "test07 go-bexpr filter with pagination",
namespace: "*",
filter: `Name matches "job-0[123]"`,
pageSize: 2,
expectedNextToken: "non-default.job-03",
expectedIDs: []string{"job-01", "job-02"},
},
{
name: "test08 go-bexpr filter in namespace",
namespace: "non-default",
filter: `Status == "pending"`,
expectedIDs: []string{"job-03"},
},
{
name: "test09 go-bexpr invalid expression",
filter: `NotValid`,
expectedError: "failed to read filter expression",
},
{
name: "test10 go-bexpr invalid field",
filter: `InvalidField == "value"`,
expectedError: "error finding value in datum",
},
{
name: "test11 missing index",
pageSize: 1,
nextToken: "default.job-07",
expectedIDs: []string{
"job-08",
},
},
{
name: "test12 same name but different NS",
namespace: "*",
pageSize: 1,
filter: `Name == "job-03"`,
expectedNextToken: "other.job-03",
expectedIDs: []string{
"job-03",
},
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
req := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: tc.namespace,
Prefix: tc.prefix,
Filter: tc.filter,
PerPage: tc.pageSize,
NextToken: tc.nextToken,
},
}
req.AuthToken = aclToken
var resp structs.JobListResponse
err := msgpackrpc.CallWithCodec(codec, "Job.List", req, &resp)
if tc.expectedError == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.expectedError)
return
}
gotIDs := []string{}
for _, job := range resp.Jobs {
gotIDs = append(gotIDs, job.ID)
}
require.Equal(t, tc.expectedIDs, gotIDs, "unexpected page of jobs")
require.Equal(t, tc.expectedNextToken, resp.QueryMeta.NextToken, "unexpected NextToken")
})
}
}
2015-09-06 19:18:45 +00:00
func TestJobEndpoint_Allocations(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
2015-09-06 19:18:45 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
alloc1 := mock.Alloc()
alloc2 := mock.Alloc()
alloc2.JobID = alloc1.JobID
state := s1.fsm.State()
2016-07-22 06:13:07 +00:00
state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))
state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})
2015-09-06 19:18:45 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the jobs
get := &structs.JobSpecificRequest{
2017-09-07 23:56:15 +00:00
JobID: alloc1.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: alloc1.Job.Namespace,
},
2015-09-06 19:18:45 +00:00
}
var resp2 structs.JobAllocationsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 1000 {
t.Fatalf("Bad index: %d %d", resp2.Index, 1000)
}
if len(resp2.Allocations) != 2 {
t.Fatalf("bad: %#v", resp2.Allocations)
}
}
2017-09-26 18:01:23 +00:00
func TestJobEndpoint_Allocations_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-09-26 18:01:23 +00:00
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
2017-09-26 18:01:23 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
2017-09-27 15:07:45 +00:00
state := s1.fsm.State()
2017-09-26 18:01:23 +00:00
2017-09-27 15:07:45 +00:00
// Create allocations for a job
2017-09-26 18:01:23 +00:00
alloc1 := mock.Alloc()
alloc2 := mock.Alloc()
alloc2.JobID = alloc1.JobID
state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))
state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2})
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-26 18:01:23 +00:00
2017-09-27 15:07:45 +00:00
// Look up allocations for that job
2017-09-26 18:01:23 +00:00
get := &structs.JobSpecificRequest{
JobID: alloc1.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: alloc1.Job.Namespace,
},
}
// Attempt to fetch the response without a token should fail
var resp structs.JobAllocationsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-26 18:01:23 +00:00
// Attempt to fetch the response with an invalid token should fail
invalidToken := mock.CreatePolicyAndToken(t, state, 1001, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-09-26 18:01:23 +00:00
2017-10-12 22:16:33 +00:00
get.AuthToken = invalidToken.SecretID
2017-09-26 18:01:23 +00:00
var invalidResp structs.JobAllocationsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-26 18:01:23 +00:00
// Attempt to fetch the response with valid management token should succeed
2017-10-12 22:16:33 +00:00
get.AuthToken = root.SecretID
2017-09-26 18:01:23 +00:00
var validResp structs.JobAllocationsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &validResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-26 18:01:23 +00:00
// Attempt to fetch the response with valid management token should succeed
validToken := mock.CreatePolicyAndToken(t, state, 1005, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
2017-09-26 18:01:23 +00:00
2017-10-12 22:16:33 +00:00
get.AuthToken = validToken.SecretID
2017-09-26 18:01:23 +00:00
var validResp2 structs.JobAllocationsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &validResp2)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-26 18:01:23 +00:00
2018-03-14 22:32:18 +00:00
require.Equal(2, len(validResp2.Allocations))
2017-09-26 18:01:23 +00:00
}
2015-10-30 02:00:02 +00:00
func TestJobEndpoint_Allocations_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
alloc1 := mock.Alloc()
alloc2 := mock.Alloc()
alloc2.JobID = "job1"
state := s1.fsm.State()
// First upsert an unrelated alloc
time.AfterFunc(100*time.Millisecond, func() {
2016-07-22 17:18:23 +00:00
state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID))
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert an alloc for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
2016-07-22 17:18:23 +00:00
state.UpsertJobSummary(199, mock.JobSummary(alloc2.JobID))
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{alloc2})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: "job1",
QueryOptions: structs.QueryOptions{
Region: "global",
2017-09-07 23:56:15 +00:00
Namespace: alloc1.Job.Namespace,
2017-02-08 06:10:33 +00:00
MinQueryIndex: 150,
},
}
var resp structs.JobAllocationsResponse
start := time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp); err != nil {
t.Fatalf("err: %v", err)
}
2015-10-30 15:27:47 +00:00
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
2015-10-30 02:00:02 +00:00
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if len(resp.Allocations) != 1 || resp.Allocations[0].JobID != "job1" {
t.Fatalf("bad: %#v", resp.Allocations)
}
}
// TestJobEndpoint_Allocations_NoJobID asserts not setting a JobID in the
// request returns an error.
func TestJobEndpoint_Allocations_NoJobID(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
get := &structs.JobSpecificRequest{
JobID: "",
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobAllocationsResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp)
require.Error(t, err)
require.Contains(t, err.Error(), "missing job ID")
}
2015-09-06 19:18:45 +00:00
func TestJobEndpoint_Evaluations(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
2015-09-06 19:18:45 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
eval1 := mock.Eval()
eval2 := mock.Eval()
eval2.JobID = eval1.JobID
state := s1.fsm.State()
err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2})
2015-09-06 19:18:45 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
// Lookup the jobs
get := &structs.JobSpecificRequest{
2017-09-07 23:56:15 +00:00
JobID: eval1.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
2015-09-06 19:18:45 +00:00
}
var resp2 structs.JobEvaluationsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
if resp2.Index != 1000 {
t.Fatalf("Bad index: %d %d", resp2.Index, 1000)
}
if len(resp2.Evaluations) != 2 {
t.Fatalf("bad: %#v", resp2.Evaluations)
}
}
2016-05-12 00:02:14 +00:00
2017-09-26 20:12:37 +00:00
func TestJobEndpoint_Evaluations_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-09-26 20:12:37 +00:00
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
2017-09-26 20:12:37 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
2017-09-27 15:25:10 +00:00
// Create evaluations for the same job
2017-09-26 20:12:37 +00:00
eval1 := mock.Eval()
eval2 := mock.Eval()
eval2.JobID = eval1.JobID
err := state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval1, eval2})
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-26 20:12:37 +00:00
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: eval1.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
}
// Attempt to fetch without providing a token
var resp structs.JobEvaluationsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-26 20:12:37 +00:00
// Attempt to fetch the response with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1001, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-09-26 20:12:37 +00:00
2017-10-12 22:16:33 +00:00
get.AuthToken = invalidToken.SecretID
2017-09-26 20:12:37 +00:00
var invalidResp structs.JobEvaluationsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-26 20:12:37 +00:00
// Attempt to fetch with valid management token should succeed
2017-10-12 22:16:33 +00:00
get.AuthToken = root.SecretID
2017-09-26 20:12:37 +00:00
var validResp structs.JobEvaluationsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &validResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.Equal(2, len(validResp.Evaluations))
2017-09-26 20:12:37 +00:00
// Attempt to fetch with valid token should succeed
validToken := mock.CreatePolicyAndToken(t, state, 1003, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
2017-09-26 20:12:37 +00:00
2017-10-12 22:16:33 +00:00
get.AuthToken = validToken.SecretID
2017-09-26 20:12:37 +00:00
var validResp2 structs.JobEvaluationsResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &validResp2)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.Equal(2, len(validResp2.Evaluations))
2017-09-26 20:12:37 +00:00
}
func TestJobEndpoint_Evaluations_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
eval1 := mock.Eval()
eval2 := mock.Eval()
eval2.JobID = "job1"
state := s1.fsm.State()
// First upsert an unrelated eval
time.AfterFunc(100*time.Millisecond, func() {
err := state.UpsertEvals(structs.MsgTypeTestSetup, 100, []*structs.Evaluation{eval1})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Upsert an eval for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
err := state.UpsertEvals(structs.MsgTypeTestSetup, 200, []*structs.Evaluation{eval2})
if err != nil {
t.Fatalf("err: %v", err)
}
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: "job1",
QueryOptions: structs.QueryOptions{
Region: "global",
2017-09-07 23:56:15 +00:00
Namespace: eval1.Namespace,
2017-02-08 06:10:33 +00:00
MinQueryIndex: 150,
},
}
var resp structs.JobEvaluationsResponse
start := time.Now()
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
if resp.Index != 200 {
t.Fatalf("Bad index: %d %d", resp.Index, 200)
}
if len(resp.Evaluations) != 1 || resp.Evaluations[0].JobID != "job1" {
t.Fatalf("bad: %#v", resp.Evaluations)
}
}
2017-07-01 00:23:34 +00:00
func TestJobEndpoint_Deployments(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
2017-07-01 00:23:34 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-07-01 00:23:34 +00:00
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d1.JobID = j.ID
d2.JobID = j.ID
require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob")
d1.JobCreateIndex = j.CreateIndex
d2.JobCreateIndex = j.CreateIndex
2018-03-14 22:32:18 +00:00
require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment")
require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment")
2017-07-01 00:23:34 +00:00
// Lookup the jobs
get := &structs.JobSpecificRequest{
2017-09-07 23:56:15 +00:00
JobID: j.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: j.Namespace,
},
2017-07-01 00:23:34 +00:00
}
var resp structs.DeploymentListResponse
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &resp), "RPC")
require.EqualValues(1002, resp.Index, "response index")
require.Len(resp.Deployments, 2, "deployments for job")
2017-07-01 00:23:34 +00:00
}
2017-09-26 20:33:03 +00:00
func TestJobEndpoint_Deployments_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-09-26 20:33:03 +00:00
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
2017-09-26 20:33:03 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
2017-09-27 15:23:38 +00:00
// Create a job and corresponding deployments
2017-09-26 20:33:03 +00:00
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d1.JobID = j.ID
d2.JobID = j.ID
require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob")
d1.JobCreateIndex = j.CreateIndex
d2.JobCreateIndex = j.CreateIndex
2018-03-14 22:32:18 +00:00
require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment")
require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment")
2017-09-26 20:33:03 +00:00
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: j.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: j.Namespace,
},
}
// Lookup with no token should fail
var resp structs.DeploymentListResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-26 20:33:03 +00:00
// Attempt to fetch the response with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1001, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-09-26 20:33:03 +00:00
2017-10-12 22:16:33 +00:00
get.AuthToken = invalidToken.SecretID
2017-09-26 20:33:03 +00:00
var invalidResp structs.DeploymentListResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-26 20:33:03 +00:00
// Lookup with valid management token should succeed
2017-10-12 22:16:33 +00:00
get.AuthToken = root.SecretID
2017-09-26 20:33:03 +00:00
var validResp structs.DeploymentListResponse
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &validResp), "RPC")
require.EqualValues(1002, validResp.Index, "response index")
require.Len(validResp.Deployments, 2, "deployments for job")
2017-09-26 20:33:03 +00:00
// Lookup with valid token should succeed
validToken := mock.CreatePolicyAndToken(t, state, 1005, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
2017-09-26 20:33:03 +00:00
2017-10-12 22:16:33 +00:00
get.AuthToken = validToken.SecretID
2017-09-26 20:33:03 +00:00
var validResp2 structs.DeploymentListResponse
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &validResp2), "RPC")
require.EqualValues(1002, validResp2.Index, "response index")
require.Len(validResp2.Deployments, 2, "deployments for job")
2017-09-26 20:33:03 +00:00
}
2017-07-01 00:23:34 +00:00
func TestJobEndpoint_Deployments_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
2017-07-01 00:23:34 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-07-01 00:23:34 +00:00
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d2.JobID = j.ID
require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 50, nil, j), "UpsertJob")
d2.JobCreateIndex = j.CreateIndex
2017-07-01 00:23:34 +00:00
// First upsert an unrelated eval
time.AfterFunc(100*time.Millisecond, func() {
2018-03-14 22:32:18 +00:00
require.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment")
2017-07-01 00:23:34 +00:00
})
// Upsert an eval for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
2018-03-14 22:32:18 +00:00
require.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment")
2017-07-01 00:23:34 +00:00
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: d2.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
2017-09-07 23:56:15 +00:00
Namespace: d2.Namespace,
2017-07-01 00:23:34 +00:00
MinQueryIndex: 150,
},
}
var resp structs.DeploymentListResponse
start := time.Now()
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &resp), "RPC")
require.EqualValues(200, resp.Index, "response index")
require.Len(resp.Deployments, 1, "deployments for job")
require.Equal(d2.ID, resp.Deployments[0].ID, "returned deployment")
2017-07-01 00:23:34 +00:00
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
}
func TestJobEndpoint_LatestDeployment(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
2017-07-01 00:23:34 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-07-01 00:23:34 +00:00
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d1.JobID = j.ID
d2.JobID = j.ID
d2.CreateIndex = d1.CreateIndex + 100
d2.ModifyIndex = d2.CreateIndex + 100
require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob")
d1.JobCreateIndex = j.CreateIndex
d2.JobCreateIndex = j.CreateIndex
2018-03-14 22:32:18 +00:00
require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment")
require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment")
2017-07-01 00:23:34 +00:00
// Lookup the jobs
get := &structs.JobSpecificRequest{
2017-09-07 23:56:15 +00:00
JobID: j.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: j.Namespace,
},
2017-07-01 00:23:34 +00:00
}
var resp structs.SingleDeploymentResponse
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &resp), "RPC")
require.EqualValues(1002, resp.Index, "response index")
require.NotNil(resp.Deployment, "want a deployment")
require.Equal(d2.ID, resp.Deployment.ID, "latest deployment for job")
2017-07-01 00:23:34 +00:00
}
2017-09-26 20:53:43 +00:00
func TestJobEndpoint_LatestDeployment_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-09-26 20:53:43 +00:00
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
2017-09-26 20:53:43 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
2017-09-27 15:20:18 +00:00
// Create a job and deployments
2017-09-26 20:53:43 +00:00
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d1.JobID = j.ID
d2.JobID = j.ID
d2.CreateIndex = d1.CreateIndex + 100
d2.ModifyIndex = d2.CreateIndex + 100
require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j), "UpsertJob")
d1.JobCreateIndex = j.CreateIndex
d2.JobCreateIndex = j.CreateIndex
2018-03-14 22:32:18 +00:00
require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment")
require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment")
2017-09-26 20:53:43 +00:00
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: j.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: j.Namespace,
},
}
// Attempt to fetch the response without a token should fail
var resp structs.SingleDeploymentResponse
err := msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-26 20:53:43 +00:00
// Attempt to fetch the response with an invalid token should fail
invalidToken := mock.CreatePolicyAndToken(t, state, 1001, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-09-26 20:53:43 +00:00
2017-10-12 22:16:33 +00:00
get.AuthToken = invalidToken.SecretID
2017-09-26 20:53:43 +00:00
var invalidResp structs.SingleDeploymentResponse
err = msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-26 20:53:43 +00:00
// Fetching latest deployment with a valid management token should succeed
2017-10-12 22:16:33 +00:00
get.AuthToken = root.SecretID
2017-09-26 20:53:43 +00:00
var validResp structs.SingleDeploymentResponse
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &validResp), "RPC")
require.EqualValues(1002, validResp.Index, "response index")
require.NotNil(validResp.Deployment, "want a deployment")
require.Equal(d2.ID, validResp.Deployment.ID, "latest deployment for job")
2017-09-26 20:53:43 +00:00
// Fetching latest deployment with a valid token should succeed
validToken := mock.CreatePolicyAndToken(t, state, 1004, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
2017-09-26 20:53:43 +00:00
2017-10-12 22:16:33 +00:00
get.AuthToken = validToken.SecretID
2017-09-26 20:53:43 +00:00
var validResp2 structs.SingleDeploymentResponse
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &validResp2), "RPC")
require.EqualValues(1002, validResp2.Index, "response index")
require.NotNil(validResp2.Deployment, "want a deployment")
require.Equal(d2.ID, validResp2.Deployment.ID, "latest deployment for job")
2017-09-26 20:53:43 +00:00
}
2017-07-01 00:23:34 +00:00
func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
2017-07-01 00:23:34 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-07-01 00:23:34 +00:00
// Create the register request
j := mock.Job()
d1 := mock.Deployment()
d2 := mock.Deployment()
d2.JobID = j.ID
require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 50, nil, j), "UpsertJob")
2019-05-13 15:16:36 +00:00
d2.JobCreateIndex = j.CreateIndex
2017-07-01 00:23:34 +00:00
// First upsert an unrelated eval
time.AfterFunc(100*time.Millisecond, func() {
2018-03-14 22:32:18 +00:00
require.Nil(state.UpsertDeployment(100, d1), "UpsertDeployment")
2017-07-01 00:23:34 +00:00
})
// Upsert an eval for the job we are interested in later
time.AfterFunc(200*time.Millisecond, func() {
2018-03-14 22:32:18 +00:00
require.Nil(state.UpsertDeployment(200, d2), "UpsertDeployment")
2017-07-01 00:23:34 +00:00
})
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: d2.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
2017-09-07 23:56:15 +00:00
Namespace: d2.Namespace,
2017-07-01 00:23:34 +00:00
MinQueryIndex: 150,
},
}
var resp structs.SingleDeploymentResponse
start := time.Now()
2018-03-14 22:32:18 +00:00
require.Nil(msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &resp), "RPC")
require.EqualValues(200, resp.Index, "response index")
require.NotNil(resp.Deployment, "deployment for job")
require.Equal(d2.ID, resp.Deployment.ID, "returned deployment")
2017-07-01 00:23:34 +00:00
if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
}
}
2017-09-19 14:47:10 +00:00
func TestJobEndpoint_Plan_ACL(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
2017-09-19 14:47:10 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2017-09-19 14:47:10 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create a plan request
job := mock.Job()
planReq := &structs.JobPlanRequest{
Job: job,
Diff: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Try without a token, expect failure
var planResp structs.JobPlanResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err == nil {
t.Fatalf("expected error")
}
// Try with a token
2017-10-12 22:16:33 +00:00
planReq.AuthToken = root.SecretID
2017-09-19 14:47:10 +00:00
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err != nil {
t.Fatalf("err: %v", err)
}
}
2016-05-12 00:02:14 +00:00
func TestJobEndpoint_Plan_WithDiff(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-05-12 00:02:14 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-05-12 00:02:14 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-05-12 00:02:14 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create a plan request
planReq := &structs.JobPlanRequest{
2017-09-07 23:56:15 +00:00
Job: job,
Diff: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-05-12 00:02:14 +00:00
}
// Fetch the response
var planResp structs.JobPlanResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
2016-05-16 18:48:44 +00:00
if planResp.JobModifyIndex == 0 {
t.Fatalf("bad cas: %d", planResp.JobModifyIndex)
2016-05-12 00:02:14 +00:00
}
2016-05-12 18:29:38 +00:00
if planResp.Annotations == nil {
t.Fatalf("no annotations")
2016-05-12 00:02:14 +00:00
}
if planResp.Diff == nil {
t.Fatalf("no diff")
}
if len(planResp.FailedTGAllocs) == 0 {
t.Fatalf("no failed task group alloc metrics")
}
2016-05-12 00:02:14 +00:00
}
func TestJobEndpoint_Plan_NoDiff(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-05-12 00:02:14 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-05-12 00:02:14 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
job := mock.Job()
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-05-12 00:02:14 +00:00
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("err: %v", err)
}
if resp.Index == 0 {
t.Fatalf("bad index: %d", resp.Index)
}
// Create a plan request
planReq := &structs.JobPlanRequest{
2017-09-07 23:56:15 +00:00
Job: job,
Diff: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
2016-05-12 00:02:14 +00:00
}
// Fetch the response
var planResp structs.JobPlanResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp); err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
2016-05-16 18:48:44 +00:00
if planResp.JobModifyIndex == 0 {
t.Fatalf("bad cas: %d", planResp.JobModifyIndex)
2016-05-12 00:02:14 +00:00
}
2016-05-12 18:29:38 +00:00
if planResp.Annotations == nil {
t.Fatalf("no annotations")
2016-05-12 00:02:14 +00:00
}
if planResp.Diff != nil {
t.Fatalf("got diff")
}
if len(planResp.FailedTGAllocs) == 0 {
t.Fatalf("no failed task group alloc metrics")
}
2016-05-12 00:02:14 +00:00
}
// TestJobEndpoint_Plan_Scaling asserts that the plan endpoint handles
// jobs with scaling block
func TestJobEndpoint_Plan_Scaling(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create a plan request
job := mock.Job()
tg := job.TaskGroups[0]
tg.Tasks[0].Resources.MemoryMB = 999999999
2020-09-29 21:57:46 +00:00
scaling := &structs.ScalingPolicy{Min: 1, Max: 100, Type: structs.ScalingPolicyTypeHorizontal}
tg.Scaling = scaling.TargetTaskGroup(job, tg)
planReq := &structs.JobPlanRequest{
Job: job,
Diff: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Try without a token, expect failure
var planResp structs.JobPlanResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Plan", planReq, &planResp)
require.NoError(t, err)
require.NotEmpty(t, planResp.FailedTGAllocs)
require.Contains(t, planResp.FailedTGAllocs, tg.Name)
}
func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Enable vault
tr, f := true, false
s1.config.VaultConfig.Enabled = &tr
s1.config.VaultConfig.AllowUnauthenticated = &f
// Replace the Vault Client on the server
tvc := &TestVaultClient{}
s1.vault = tvc
policy := "foo"
goodToken := uuid.Generate()
goodPolicies := []string{"foo", "bar", "baz"}
tvc.SetLookupTokenAllowedPolicies(goodToken, goodPolicies)
// Create the register request with a job asking for a vault policy
job := mock.Job()
job.VaultToken = goodToken
job.TaskGroups[0].Tasks[0].Vault = &structs.Vault{
Policies: []string{policy},
ChangeMode: structs.VaultChangeModeRestart,
}
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
// Check that there is an implicit Vault and Consul constraint.
require.Len(t, out.TaskGroups[0].Constraints, 2)
require.ElementsMatch(t, out.TaskGroups[0].Constraints, []*structs.Constraint{
consulServiceDiscoveryConstraint, vaultConstraint,
})
}
func TestJobEndpoint_ValidateJob_ConsulConnect(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
validateJob := func(j *structs.Job) error {
req := &structs.JobRegisterRequest{
Job: j,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: j.Namespace,
},
}
var resp structs.JobValidateResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Validate", req, &resp); err != nil {
return err
}
if resp.Error != "" {
return errors.New(resp.Error)
}
if len(resp.ValidationErrors) != 0 {
return errors.New(strings.Join(resp.ValidationErrors, ","))
}
if resp.Warnings != "" {
return errors.New(resp.Warnings)
}
return nil
}
tgServices := []*structs.Service{
{
Name: "count-api",
PortLabel: "9001",
Connect: &structs.ConsulConnect{
SidecarService: &structs.ConsulSidecarService{},
},
},
}
t.Run("plain job", func(t *testing.T) {
j := mock.Job()
require.NoError(t, validateJob(j))
})
t.Run("valid consul connect", func(t *testing.T) {
j := mock.Job()
tg := j.TaskGroups[0]
tg.Services = tgServices
tg.Networks[0].Mode = "bridge"
err := validateJob(j)
require.NoError(t, err)
})
t.Run("consul connect but missing network", func(t *testing.T) {
j := mock.Job()
tg := j.TaskGroups[0]
tg.Services = tgServices
tg.Networks = nil
err := validateJob(j)
require.Error(t, err)
require.Contains(t, err.Error(), `Consul Connect sidecars require exactly 1 network`)
})
t.Run("consul connect but non bridge network", func(t *testing.T) {
j := mock.Job()
tg := j.TaskGroups[0]
tg.Services = tgServices
tg.Networks = structs.Networks{
{Mode: "host"},
}
err := validateJob(j)
require.Error(t, err)
require.Contains(t, err.Error(), `Consul Connect sidecar requires bridge network, found "host" in group "web"`)
})
}
func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request with a job asking for a template that sends a
// signal
job := mock.Job()
signal1 := "SIGUSR1"
signal2 := "SIGHUP"
job.TaskGroups[0].Tasks[0].Templates = []*structs.Template{
2017-09-26 22:26:33 +00:00
{
SourcePath: "foo",
DestPath: "bar",
ChangeMode: structs.TemplateChangeModeSignal,
ChangeSignal: signal1,
},
{
SourcePath: "foo",
DestPath: "baz",
ChangeMode: structs.TemplateChangeModeSignal,
ChangeSignal: signal2,
},
}
req := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
var resp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp); err != nil {
t.Fatalf("bad: %v", err)
}
// Check for the job in the FSM
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != resp.JobModifyIndex {
t.Fatalf("index mis-match")
}
// Check that there is an implicit signal and Consul constraint.
require.Len(t, out.TaskGroups[0].Constraints, 2)
require.ElementsMatch(t, out.TaskGroups[0].Constraints, []*structs.Constraint{
getSignalConstraint([]string{signal1, signal2}), consulServiceDiscoveryConstraint},
)
}
func TestJobEndpoint_ValidateJobUpdate(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
old := mock.Job()
new := mock.Job()
if err := validateJobUpdate(old, new); err != nil {
t.Errorf("expected update to be valid but got: %v", err)
}
new.Type = "batch"
if err := validateJobUpdate(old, new); err == nil {
t.Errorf("expected err when setting new job to a different type")
} else {
t.Log(err)
}
new = mock.Job()
new.Periodic = &structs.PeriodicConfig{Enabled: true}
if err := validateJobUpdate(old, new); err == nil {
t.Errorf("expected err when setting new job to periodic")
} else {
t.Log(err)
}
new = mock.Job()
new.ParameterizedJob = &structs.ParameterizedJobConfig{}
if err := validateJobUpdate(old, new); err == nil {
t.Errorf("expected err when setting new job to parameterized")
} else {
t.Log(err)
}
new = mock.Job()
new.Dispatched = true
require.Error(validateJobUpdate(old, new),
"expected err when setting new job to dispatched")
require.Error(validateJobUpdate(nil, new),
"expected err when setting new job to dispatched")
require.Error(validateJobUpdate(new, old),
"expected err when setting dispatched to false")
require.NoError(validateJobUpdate(nil, old))
}
2017-09-25 17:30:31 +00:00
func TestJobEndpoint_ValidateJobUpdate_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-09-25 17:30:31 +00:00
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
2017-09-25 17:30:31 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2017-09-25 17:30:31 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
job := mock.Job()
req := &structs.JobValidateRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
2017-09-25 17:41:17 +00:00
// Attempt to update without providing a valid token
2017-09-25 17:30:31 +00:00
var resp structs.JobValidateResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Validate", req, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
2017-09-25 17:30:31 +00:00
// Update with a valid token
2017-10-12 22:16:33 +00:00
req.AuthToken = root.SecretID
2017-09-25 17:30:31 +00:00
var validResp structs.JobValidateResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Validate", req, &validResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-25 17:30:31 +00:00
2018-03-14 22:32:18 +00:00
require.Equal("", validResp.Error)
require.Equal("", validResp.Warnings)
2017-09-25 17:30:31 +00:00
}
func TestJobEndpoint_ValidateJob_PriorityNotOk(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
validateJob := func(j *structs.Job) error {
req := &structs.JobRegisterRequest{
Job: j,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: j.Namespace,
},
}
var resp structs.JobValidateResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Validate", req, &resp); err != nil {
return err
}
if resp.Error != "" {
return errors.New(resp.Error)
}
if len(resp.ValidationErrors) != 0 {
return errors.New(strings.Join(resp.ValidationErrors, ","))
}
if resp.Warnings != "" {
return errors.New(resp.Warnings)
}
return nil
}
t.Run("job with invalid min priority", func(t *testing.T) {
j := mock.Job()
j.Priority = -1
err := validateJob(j)
must.Error(t, err)
must.ErrorContains(t, err, "job priority must be between")
})
t.Run("job with invalid max priority", func(t *testing.T) {
j := mock.Job()
j.Priority = 101
err := validateJob(j)
must.Error(t, err)
must.ErrorContains(t, err, "job priority must be between")
})
}
2017-09-27 16:30:13 +00:00
func TestJobEndpoint_Dispatch_ACL(t *testing.T) {
ci.Parallel(t)
2018-03-14 22:32:18 +00:00
require := require.New(t)
2017-09-27 16:30:13 +00:00
s1, root, cleanupS1 := TestACLServer(t, func(c *Config) {
2017-09-27 16:30:13 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2017-09-27 16:30:13 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
// Create a parameterized job
2018-03-20 21:49:29 +00:00
job := mock.BatchJob()
2017-09-27 16:30:13 +00:00
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
err := state.UpsertJob(structs.MsgTypeTestSetup, 400, nil, job)
2018-03-14 22:32:18 +00:00
require.Nil(err)
2017-09-27 16:30:13 +00:00
req := &structs.JobDispatchRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Attempt to fetch the response without a token should fail
var resp structs.JobDispatchResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Dispatch", req, &resp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-27 16:30:13 +00:00
// Attempt to fetch the response with an invalid token should fail
invalidToken := mock.CreatePolicyAndToken(t, state, 1001, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
2017-10-12 22:16:33 +00:00
req.AuthToken = invalidToken.SecretID
2017-09-27 16:30:13 +00:00
var invalidResp structs.JobDispatchResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Dispatch", req, &invalidResp)
2018-03-14 22:32:18 +00:00
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
2017-09-27 16:30:13 +00:00
// Dispatch with a valid management token should succeed
2017-10-12 22:16:33 +00:00
req.AuthToken = root.SecretID
2017-09-27 16:30:13 +00:00
var validResp structs.JobDispatchResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Dispatch", req, &validResp)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.NotNil(validResp.EvalID)
require.NotNil(validResp.DispatchedJobID)
require.NotEqual(validResp.DispatchedJobID, "")
2017-09-27 16:30:13 +00:00
// Dispatch with a valid token should succeed
validToken := mock.CreatePolicyAndToken(t, state, 1003, "test-valid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityDispatchJob}))
2017-10-12 22:16:33 +00:00
req.AuthToken = validToken.SecretID
2017-09-27 16:30:13 +00:00
var validResp2 structs.JobDispatchResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Dispatch", req, &validResp2)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.NotNil(validResp2.EvalID)
require.NotNil(validResp2.DispatchedJobID)
require.NotEqual(validResp2.DispatchedJobID, "")
2017-09-27 16:30:13 +00:00
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.Namespace, validResp2.DispatchedJobID)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.NotNil(out)
require.Equal(out.ParentID, job.ID)
2017-09-27 16:30:13 +00:00
// Look up the evaluation
eval, err := state.EvalByID(ws, validResp2.EvalID)
2018-03-14 22:32:18 +00:00
require.Nil(err)
require.NotNil(eval)
require.Equal(eval.CreateIndex, validResp2.EvalCreateIndex)
2017-09-27 16:30:13 +00:00
}
2016-12-02 23:37:26 +00:00
func TestJobEndpoint_Dispatch(t *testing.T) {
ci.Parallel(t)
2016-12-02 23:37:26 +00:00
// No requirements
2018-03-20 21:49:29 +00:00
d1 := mock.BatchJob()
d1.ParameterizedJob = &structs.ParameterizedJobConfig{}
2016-12-02 23:37:26 +00:00
// Require input data
2018-03-20 21:49:29 +00:00
d2 := mock.BatchJob()
d2.ParameterizedJob = &structs.ParameterizedJobConfig{
2016-12-14 20:50:08 +00:00
Payload: structs.DispatchPayloadRequired,
2016-12-02 23:37:26 +00:00
}
// Disallow input data
2018-03-20 21:49:29 +00:00
d3 := mock.BatchJob()
d3.ParameterizedJob = &structs.ParameterizedJobConfig{
2016-12-14 20:50:08 +00:00
Payload: structs.DispatchPayloadForbidden,
2016-12-02 23:37:26 +00:00
}
// Require meta
2018-03-20 21:49:29 +00:00
d4 := mock.BatchJob()
d4.ParameterizedJob = &structs.ParameterizedJobConfig{
2016-12-02 23:37:26 +00:00
MetaRequired: []string{"foo", "bar"},
}
// Optional meta
2018-03-20 21:49:29 +00:00
d5 := mock.BatchJob()
d5.ParameterizedJob = &structs.ParameterizedJobConfig{
2016-12-02 23:37:26 +00:00
MetaOptional: []string{"foo", "bar"},
}
// Periodic dispatch job
d6 := mock.PeriodicJob()
d6.ParameterizedJob = &structs.ParameterizedJobConfig{}
2018-03-20 21:49:29 +00:00
d7 := mock.BatchJob()
2017-04-15 23:47:19 +00:00
d7.ParameterizedJob = &structs.ParameterizedJobConfig{}
d7.Stop = true
2016-12-02 23:37:26 +00:00
reqNoInputNoMeta := &structs.JobDispatchRequest{}
reqInputDataNoMeta := &structs.JobDispatchRequest{
2016-12-14 20:50:08 +00:00
Payload: []byte("hello world"),
2016-12-02 23:37:26 +00:00
}
reqNoInputDataMeta := &structs.JobDispatchRequest{
Meta: map[string]string{
"foo": "f1",
"bar": "f2",
},
}
reqInputDataMeta := &structs.JobDispatchRequest{
2016-12-14 20:50:08 +00:00
Payload: []byte("hello world"),
2016-12-02 23:37:26 +00:00
Meta: map[string]string{
"foo": "f1",
"bar": "f2",
},
}
reqBadMeta := &structs.JobDispatchRequest{
2016-12-14 20:50:08 +00:00
Payload: []byte("hello world"),
2016-12-02 23:37:26 +00:00
Meta: map[string]string{
"foo": "f1",
"bar": "f2",
"baz": "f3",
},
}
reqInputDataTooLarge := &structs.JobDispatchRequest{
2016-12-14 20:50:08 +00:00
Payload: make([]byte, DispatchPayloadSizeLimit+100),
2016-12-02 23:37:26 +00:00
}
type existingIdempotentChildJob struct {
isTerminal bool
}
2016-12-02 23:37:26 +00:00
type testCase struct {
name string
parameterizedJob *structs.Job
dispatchReq *structs.JobDispatchRequest
noEval bool
err bool
errStr string
idempotencyToken string
existingIdempotentJob *existingIdempotentChildJob
2016-12-02 23:37:26 +00:00
}
cases := []testCase{
{
name: "optional input data w/ data",
parameterizedJob: d1,
dispatchReq: reqInputDataNoMeta,
err: false,
2016-12-02 23:37:26 +00:00
},
{
name: "optional input data w/o data",
parameterizedJob: d1,
dispatchReq: reqNoInputNoMeta,
err: false,
2016-12-02 23:37:26 +00:00
},
{
name: "require input data w/ data",
parameterizedJob: d2,
dispatchReq: reqInputDataNoMeta,
err: false,
2016-12-02 23:37:26 +00:00
},
{
name: "require input data w/o data",
parameterizedJob: d2,
dispatchReq: reqNoInputNoMeta,
err: true,
errStr: "not provided but required",
2016-12-02 23:37:26 +00:00
},
{
name: "disallow input data w/o data",
parameterizedJob: d3,
dispatchReq: reqNoInputNoMeta,
err: false,
2016-12-02 23:37:26 +00:00
},
{
name: "disallow input data w/ data",
parameterizedJob: d3,
dispatchReq: reqInputDataNoMeta,
err: true,
errStr: "provided but forbidden",
2016-12-02 23:37:26 +00:00
},
{
name: "require meta w/ meta",
parameterizedJob: d4,
dispatchReq: reqInputDataMeta,
err: false,
2016-12-02 23:37:26 +00:00
},
{
name: "require meta w/o meta",
parameterizedJob: d4,
dispatchReq: reqNoInputNoMeta,
err: true,
errStr: "did not provide required meta keys",
2016-12-02 23:37:26 +00:00
},
{
name: "optional meta w/ meta",
parameterizedJob: d5,
dispatchReq: reqNoInputDataMeta,
err: false,
2016-12-02 23:37:26 +00:00
},
{
name: "optional meta w/o meta",
parameterizedJob: d5,
dispatchReq: reqNoInputNoMeta,
err: false,
2016-12-02 23:37:26 +00:00
},
{
name: "optional meta w/ bad meta",
parameterizedJob: d5,
dispatchReq: reqBadMeta,
err: true,
errStr: "unpermitted metadata keys",
2016-12-02 23:37:26 +00:00
},
{
name: "optional input w/ too big of input",
parameterizedJob: d1,
dispatchReq: reqInputDataTooLarge,
err: true,
errStr: "Payload exceeds maximum size",
2016-12-02 23:37:26 +00:00
},
{
name: "periodic job dispatched, ensure no eval",
parameterizedJob: d6,
dispatchReq: reqNoInputNoMeta,
noEval: true,
},
2017-04-15 23:47:19 +00:00
{
name: "periodic job stopped, ensure error",
parameterizedJob: d7,
dispatchReq: reqNoInputNoMeta,
err: true,
errStr: "stopped",
},
{
name: "idempotency token, no existing child job",
parameterizedJob: d1,
dispatchReq: reqInputDataNoMeta,
err: false,
idempotencyToken: "foo",
existingIdempotentJob: nil,
},
{
name: "idempotency token, w/ existing non-terminal child job",
parameterizedJob: d1,
dispatchReq: reqInputDataNoMeta,
err: false,
idempotencyToken: "foo",
existingIdempotentJob: &existingIdempotentChildJob{
isTerminal: false,
},
noEval: true,
},
{
name: "idempotency token, w/ existing terminal job",
parameterizedJob: d1,
dispatchReq: reqInputDataNoMeta,
err: false,
idempotencyToken: "foo",
existingIdempotentJob: &existingIdempotentChildJob{
isTerminal: true,
},
noEval: true,
},
2016-12-02 23:37:26 +00:00
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
s1, cleanupS1 := TestServer(t, func(c *Config) {
2016-12-02 23:37:26 +00:00
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
2016-12-02 23:37:26 +00:00
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
// Create the register request
regReq := &structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: tc.parameterizedJob,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: tc.parameterizedJob.Namespace,
},
2016-12-02 23:37:26 +00:00
}
// Fetch the response
var regResp structs.JobRegisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", regReq, &regResp); err != nil {
t.Fatalf("err: %v", err)
}
// Now try to dispatch
tc.dispatchReq.JobID = tc.parameterizedJob.ID
2017-09-07 23:56:15 +00:00
tc.dispatchReq.WriteRequest = structs.WriteRequest{
Region: "global",
Namespace: tc.parameterizedJob.Namespace,
IdempotencyToken: tc.idempotencyToken,
2017-09-07 23:56:15 +00:00
}
2016-12-02 23:37:26 +00:00
// Dispatch with the same request so a child job w/ the idempotency key exists
var initialIdempotentDispatchResp structs.JobDispatchResponse
if tc.existingIdempotentJob != nil {
if err := msgpackrpc.CallWithCodec(codec, "Job.Dispatch", tc.dispatchReq, &initialIdempotentDispatchResp); err != nil {
t.Fatalf("Unexpected error dispatching initial idempotent job: %v", err)
}
if tc.existingIdempotentJob.isTerminal {
eval, err := s1.State().EvalByID(nil, initialIdempotentDispatchResp.EvalID)
if err != nil {
t.Fatalf("Unexpected error fetching eval %v", err)
}
eval = eval.Copy()
eval.Status = structs.EvalStatusComplete
err = s1.State().UpsertEvals(structs.MsgTypeTestSetup, initialIdempotentDispatchResp.Index+1, []*structs.Evaluation{eval})
if err != nil {
t.Fatalf("Unexpected error completing eval %v", err)
}
}
}
2016-12-02 23:37:26 +00:00
var dispatchResp structs.JobDispatchResponse
dispatchErr := msgpackrpc.CallWithCodec(codec, "Job.Dispatch", tc.dispatchReq, &dispatchResp)
if dispatchErr == nil {
if tc.err {
t.Fatalf("Expected error: %v", dispatchErr)
}
// Check that we got an eval and job id back
switch dispatchResp.EvalID {
case "":
if !tc.noEval {
t.Fatalf("Bad response")
}
default:
if tc.noEval {
t.Fatalf("Got eval %q", dispatchResp.EvalID)
}
}
if dispatchResp.DispatchedJobID == "" {
2016-12-02 23:37:26 +00:00
t.Fatalf("Bad response")
}
state := s1.fsm.State()
2017-02-08 05:22:48 +00:00
ws := memdb.NewWatchSet()
2017-09-07 23:56:15 +00:00
out, err := state.JobByID(ws, tc.parameterizedJob.Namespace, dispatchResp.DispatchedJobID)
2016-12-02 23:37:26 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
if out == nil {
t.Fatalf("expected job")
}
if out.CreateIndex != dispatchResp.JobCreateIndex {
t.Fatalf("index mis-match")
}
if out.ParentID != tc.parameterizedJob.ID {
2016-12-07 00:58:44 +00:00
t.Fatalf("bad parent ID")
}
if !out.Dispatched {
t.Fatal("expected dispatched job")
}
2018-06-11 17:27:48 +00:00
if out.IsParameterized() {
t.Fatal("dispatched job should not be parameterized")
}
if out.ParameterizedJob == nil {
t.Fatal("parameter job config should exist")
}
2016-12-02 23:37:26 +00:00
// Check that the existing job is returned in the case of a supplied idempotency token
if tc.idempotencyToken != "" && tc.existingIdempotentJob != nil {
if dispatchResp.DispatchedJobID != initialIdempotentDispatchResp.DispatchedJobID {
t.Fatal("dispatched job id should match initial dispatch")
}
if dispatchResp.JobCreateIndex != initialIdempotentDispatchResp.JobCreateIndex {
t.Fatal("dispatched job create index should match initial dispatch")
}
}
if tc.noEval {
return
}
2016-12-02 23:37:26 +00:00
// Lookup the evaluation
2017-02-08 05:22:48 +00:00
eval, err := state.EvalByID(ws, dispatchResp.EvalID)
2016-12-02 23:37:26 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
2016-12-02 23:37:26 +00:00
if eval == nil {
t.Fatalf("expected eval")
}
if eval.CreateIndex != dispatchResp.EvalCreateIndex {
t.Fatalf("index mis-match")
}
} else {
if !tc.err {
t.Fatalf("Got unexpected error: %v", dispatchErr)
} else if !strings.Contains(dispatchErr.Error(), tc.errStr) {
t.Fatalf("Expected err to include %q; got %v", tc.errStr, dispatchErr)
}
}
})
}
}
// TestJobEndpoint_Dispatch_JobChildrenSummary asserts that the job summary is updated
// appropriately as its dispatched/children jobs status are updated.
func TestJobEndpoint_Dispatch_JobChildrenSummary(t *testing.T) {
ci.Parallel(t)
s1, cleanupS1 := TestServer(t, func(c *Config) {
c.NumSchedulers = 0 // Prevent automatic dequeue
})
defer cleanupS1()
state := s1.fsm.State()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
node := mock.Node()
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1, node))
parameterizedJob := mock.BatchJob()
parameterizedJob.ParameterizedJob = &structs.ParameterizedJobConfig{}
// Create the register request
regReq := &structs.JobRegisterRequest{
Job: parameterizedJob,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: parameterizedJob.Namespace,
},
}
var regResp structs.JobRegisterResponse
require.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Register", regReq, &regResp))
jobChildren := func() *structs.JobChildrenSummary {
summary, err := state.JobSummaryByID(nil, parameterizedJob.Namespace, parameterizedJob.ID)
require.NoError(t, err)
return summary.Children
}
require.Equal(t, &structs.JobChildrenSummary{}, jobChildren())
// dispatch a child job
dispatchReq := &structs.JobDispatchRequest{
JobID: parameterizedJob.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: parameterizedJob.Namespace,
},
}
var dispatchResp structs.JobDispatchResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Dispatch", dispatchReq, &dispatchResp)
require.NoError(t, err)
nextIdx := dispatchResp.Index + 1
require.Equal(t, &structs.JobChildrenSummary{Pending: 1}, jobChildren())
dispatchedJob, err := state.JobByID(nil, parameterizedJob.Namespace, dispatchResp.DispatchedJobID)
require.NoError(t, err)
require.NotNil(t, dispatchedJob)
dispatchedStatus := func() string {
job, err := state.JobByID(nil, dispatchedJob.Namespace, dispatchedJob.ID)
require.NoError(t, err)
require.NotNil(t, job)
return job.Status
}
// Let's start an alloc for the dispatch job and walk through states
// Note that job summary reports 1 running even when alloc is pending!
nextIdx++
alloc := mock.Alloc()
alloc.Job = dispatchedJob
alloc.JobID = dispatchedJob.ID
alloc.TaskGroup = dispatchedJob.TaskGroups[0].Name
alloc.Namespace = dispatchedJob.Namespace
alloc.ClientStatus = structs.AllocClientStatusPending
err = s1.State().UpsertAllocs(structs.MsgTypeTestSetup, nextIdx, []*structs.Allocation{alloc})
require.NoError(t, err)
require.Equal(t, &structs.JobChildrenSummary{Running: 1}, jobChildren())
require.Equal(t, structs.JobStatusRunning, dispatchedStatus())
// mark the creation eval as completed
nextIdx++
eval, err := state.EvalByID(nil, dispatchResp.EvalID)
require.NoError(t, err)
eval = eval.Copy()
eval.Status = structs.EvalStatusComplete
require.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, nextIdx, []*structs.Evaluation{eval}))
updateAllocStatus := func(status string) {
nextIdx++
nalloc, err := state.AllocByID(nil, alloc.ID)
require.NoError(t, err)
nalloc = nalloc.Copy()
nalloc.ClientStatus = status
err = s1.State().UpdateAllocsFromClient(structs.MsgTypeTestSetup, nextIdx, []*structs.Allocation{nalloc})
require.NoError(t, err)
}
// job should remain remaining when alloc runs
updateAllocStatus(structs.AllocClientStatusRunning)
require.Equal(t, &structs.JobChildrenSummary{Running: 1}, jobChildren())
require.Equal(t, structs.JobStatusRunning, dispatchedStatus())
// job should be dead after alloc completes
updateAllocStatus(structs.AllocClientStatusComplete)
require.Equal(t, &structs.JobChildrenSummary{Dead: 1}, jobChildren())
require.Equal(t, structs.JobStatusDead, dispatchedStatus())
}
func TestJobEndpoint_Dispatch_ACL_RejectedBySchedulerConfig(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
job := mock.BatchJob()
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
require.NoError(t, err)
dispatch := &structs.JobDispatchRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
submitJobToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid-write",
mock.NamespacePolicy(structs.DefaultNamespace, "write", nil)).
SecretID
cases := []struct {
name string
token string
rejectEnabled bool
errExpected string
}{
{
name: "reject disabled, with a submit token",
token: submitJobToken,
rejectEnabled: false,
},
{
name: "reject enabled, with a submit token",
token: submitJobToken,
rejectEnabled: true,
errExpected: structs.ErrJobRegistrationDisabled.Error(),
},
{
name: "reject enabled, without a token",
token: "",
rejectEnabled: true,
errExpected: structs.ErrPermissionDenied.Error(),
},
{
name: "reject enabled, with a management token",
token: root.SecretID,
rejectEnabled: true,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
cfgReq := &structs.SchedulerSetConfigRequest{
Config: structs.SchedulerConfiguration{
RejectJobRegistration: tc.rejectEnabled,
},
WriteRequest: structs.WriteRequest{
Region: "global",
},
}
cfgReq.AuthToken = root.SecretID
err := msgpackrpc.CallWithCodec(codec, "Operator.SchedulerSetConfiguration",
cfgReq, &structs.SchedulerSetConfigurationResponse{},
)
require.NoError(t, err)
dispatch.AuthToken = tc.token
var resp structs.JobDispatchResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Dispatch", dispatch, &resp)
if tc.errExpected != "" {
require.Error(t, err, "expected error")
require.EqualError(t, err, tc.errExpected)
} else {
require.NoError(t, err, "unexpected error")
require.NotEqual(t, 0, resp.Index)
}
})
}
}
func TestJobEndpoint_Scale(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
job := mock.Job()
originalCount := job.TaskGroups[0].Count
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
require.Nil(err)
groupName := job.TaskGroups[0].Name
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: groupName,
},
Count: pointer.Of(int64(originalCount + 1)),
Message: "because of the load",
Meta: map[string]interface{}{
"metrics": map[string]string{
"1": "a",
"2": "b",
},
"other": "value",
},
PolicyOverride: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.NoError(err)
require.NotEmpty(resp.EvalID)
require.Greater(resp.EvalCreateIndex, resp.JobModifyIndex)
events, _, _ := state.ScalingEventsByJob(nil, job.Namespace, job.ID)
require.Equal(1, len(events[groupName]))
require.Equal(int64(originalCount), events[groupName][0].PreviousCount)
}
func TestJobEndpoint_Scale_DeploymentBlocking(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
type testCase struct {
latestDeploymentStatus string
}
cases := []string{
structs.DeploymentStatusSuccessful,
structs.DeploymentStatusPaused,
structs.DeploymentStatusRunning,
}
for _, tc := range cases {
// create a job with a deployment history
job := mock.Job()
require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job), "UpsertJob")
d1 := mock.Deployment()
d1.Status = structs.DeploymentStatusCancelled
d1.StatusDescription = structs.DeploymentStatusDescriptionNewerJob
d1.JobID = job.ID
d1.JobCreateIndex = job.CreateIndex
require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment")
d2 := mock.Deployment()
d2.Status = structs.DeploymentStatusSuccessful
d2.StatusDescription = structs.DeploymentStatusDescriptionSuccessful
d2.JobID = job.ID
d2.JobCreateIndex = job.CreateIndex
require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment")
// add the latest deployment for the test case
dLatest := mock.Deployment()
dLatest.Status = tc
dLatest.StatusDescription = "description does not matter for this test"
dLatest.JobID = job.ID
dLatest.JobCreateIndex = job.CreateIndex
require.Nil(state.UpsertDeployment(1003, dLatest), "UpsertDeployment")
// attempt to scale
originalCount := job.TaskGroups[0].Count
newCount := int64(originalCount + 1)
groupName := job.TaskGroups[0].Name
scalingMetadata := map[string]interface{}{
"meta": "data",
}
scalingMessage := "original reason for scaling"
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: groupName,
},
Meta: scalingMetadata,
Message: scalingMessage,
Count: pointer.Of(newCount),
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
if dLatest.Active() {
// should fail
require.Error(err, "test case %q", tc)
require.Contains(err.Error(), "active deployment")
} else {
require.NoError(err, "test case %q", tc)
require.NotEmpty(resp.EvalID)
require.Greater(resp.EvalCreateIndex, resp.JobModifyIndex)
}
}
}
func TestJobEndpoint_Scale_InformationalEventsShouldNotBeBlocked(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
type testCase struct {
latestDeploymentStatus string
}
cases := []string{
structs.DeploymentStatusSuccessful,
structs.DeploymentStatusPaused,
structs.DeploymentStatusRunning,
}
for _, tc := range cases {
// create a job with a deployment history
job := mock.Job()
require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job), "UpsertJob")
d1 := mock.Deployment()
d1.Status = structs.DeploymentStatusCancelled
d1.StatusDescription = structs.DeploymentStatusDescriptionNewerJob
d1.JobID = job.ID
d1.JobCreateIndex = job.CreateIndex
require.Nil(state.UpsertDeployment(1001, d1), "UpsertDeployment")
d2 := mock.Deployment()
d2.Status = structs.DeploymentStatusSuccessful
d2.StatusDescription = structs.DeploymentStatusDescriptionSuccessful
d2.JobID = job.ID
d2.JobCreateIndex = job.CreateIndex
require.Nil(state.UpsertDeployment(1002, d2), "UpsertDeployment")
// add the latest deployment for the test case
dLatest := mock.Deployment()
dLatest.Status = tc
dLatest.StatusDescription = "description does not matter for this test"
dLatest.JobID = job.ID
dLatest.JobCreateIndex = job.CreateIndex
require.Nil(state.UpsertDeployment(1003, dLatest), "UpsertDeployment")
// register informational scaling event
groupName := job.TaskGroups[0].Name
scalingMetadata := map[string]interface{}{
"meta": "data",
}
scalingMessage := "original reason for scaling"
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: groupName,
},
Meta: scalingMetadata,
Message: scalingMessage,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.NoError(err, "test case %q", tc)
require.Empty(resp.EvalID)
events, _, _ := state.ScalingEventsByJob(nil, job.Namespace, job.ID)
require.Equal(1, len(events[groupName]))
latestEvent := events[groupName][0]
require.False(latestEvent.Error)
require.Nil(latestEvent.Count)
require.Equal(scalingMessage, latestEvent.Message)
require.Equal(scalingMetadata, latestEvent.Meta)
}
}
func TestJobEndpoint_Scale_ACL(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
job := mock.Job()
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
require.Nil(err)
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: job.TaskGroups[0].Name,
},
Message: "because of the load",
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Scale without a token should fail
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
// Expect failure for request with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
scale.AuthToken = invalidToken.SecretID
var invalidResp structs.JobRegisterResponse
require.NotNil(err)
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &invalidResp)
require.Contains(err.Error(), "Permission denied")
type testCase struct {
authToken string
name string
}
cases := []testCase{
{
name: "mgmt token should succeed",
authToken: root.SecretID,
},
{
name: "write disposition should succeed",
authToken: mock.CreatePolicyAndToken(t, state, 1005, "test-valid-write",
mock.NamespacePolicy(structs.DefaultNamespace, "write", nil)).
SecretID,
},
{
name: "autoscaler disposition should succeed",
authToken: mock.CreatePolicyAndToken(t, state, 1005, "test-valid-autoscaler",
mock.NamespacePolicy(structs.DefaultNamespace, "scale", nil)).
SecretID,
},
{
name: "submit-job capability should succeed",
authToken: mock.CreatePolicyAndToken(t, state, 1005, "test-valid-submit-job",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob})).SecretID,
},
{
name: "scale-job capability should succeed",
authToken: mock.CreatePolicyAndToken(t, state, 1005, "test-valid-scale-job",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityScaleJob})).
SecretID,
},
}
for _, tc := range cases {
scale.AuthToken = tc.authToken
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.NoError(err, tc.name)
require.NotNil(resp.EvalID)
}
}
func TestJobEndpoint_Scale_ACL_RejectedBySchedulerConfig(t *testing.T) {
ci.Parallel(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
job := mock.Job()
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
require.NoError(t, err)
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: job.TaskGroups[0].Name,
},
Message: "because of the load",
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
submitJobToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid-write",
mock.NamespacePolicy(structs.DefaultNamespace, "write", nil)).
SecretID
cases := []struct {
name string
token string
rejectEnabled bool
errExpected string
}{
{
name: "reject disabled, with a submit token",
token: submitJobToken,
rejectEnabled: false,
},
{
name: "reject enabled, with a submit token",
token: submitJobToken,
rejectEnabled: true,
errExpected: structs.ErrJobRegistrationDisabled.Error(),
},
{
name: "reject enabled, without a token",
token: "",
rejectEnabled: true,
errExpected: structs.ErrPermissionDenied.Error(),
},
{
name: "reject enabled, with a management token",
token: root.SecretID,
rejectEnabled: true,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
cfgReq := &structs.SchedulerSetConfigRequest{
Config: structs.SchedulerConfiguration{
RejectJobRegistration: tc.rejectEnabled,
},
WriteRequest: structs.WriteRequest{
Region: "global",
},
}
cfgReq.AuthToken = root.SecretID
err := msgpackrpc.CallWithCodec(codec, "Operator.SchedulerSetConfiguration",
cfgReq, &structs.SchedulerSetConfigurationResponse{},
)
require.NoError(t, err)
var resp structs.JobRegisterResponse
scale.AuthToken = tc.token
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
if tc.errExpected != "" {
require.Error(t, err, "expected error")
require.EqualError(t, err, tc.errExpected)
} else {
require.NoError(t, err, "unexpected error")
require.NotEqual(t, 0, resp.Index)
}
})
}
}
func TestJobEndpoint_Scale_Invalid(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
job := mock.Job()
count := job.TaskGroups[0].Count
// check before job registration
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: job.TaskGroups[0].Name,
},
Count: pointer.Of(int64(count) + 1),
Message: "this should fail",
Meta: map[string]interface{}{
"metrics": map[string]string{
"1": "a",
"2": "b",
},
"other": "value",
},
PolicyOverride: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.Error(err)
require.Contains(err.Error(), "not found")
// register the job
err = state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
require.Nil(err)
scale.Count = pointer.Of(int64(10))
scale.Message = "error message"
scale.Error = true
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.Error(err)
require.Contains(err.Error(), "should not contain count if error is true")
}
func TestJobEndpoint_Scale_OutOfBounds(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
job, pol := mock.JobWithScalingPolicy()
pol.Min = 3
pol.Max = 10
job.TaskGroups[0].Count = 5
// register the job
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
require.Nil(err)
var resp structs.JobRegisterResponse
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: job.TaskGroups[0].Name,
},
Count: pointer.Of(pol.Max + 1),
2021-01-08 21:13:29 +00:00
Message: "out of bounds",
PolicyOverride: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.Error(err)
2021-01-08 21:13:29 +00:00
require.Contains(err.Error(), "group count was greater than scaling policy maximum: 11 > 10")
scale.Count = pointer.Of(int64(2))
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.Error(err)
2021-01-08 21:13:29 +00:00
require.Contains(err.Error(), "group count was less than scaling policy minimum: 2 < 3")
}
func TestJobEndpoint_Scale_NoEval(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
job := mock.Job()
groupName := job.TaskGroups[0].Name
originalCount := job.TaskGroups[0].Count
2020-04-01 18:11:58 +00:00
var resp structs.JobRegisterResponse
err := msgpackrpc.CallWithCodec(codec, "Job.Register", &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}, &resp)
jobCreateIndex := resp.Index
require.NoError(err)
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: groupName,
},
Count: nil, // no count => no eval
Message: "something informative",
Meta: map[string]interface{}{
"metrics": map[string]string{
"1": "a",
"2": "b",
},
"other": "value",
},
PolicyOverride: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.NoError(err)
require.Empty(resp.EvalID)
require.Empty(resp.EvalCreateIndex)
jobEvents, eventsIndex, err := state.ScalingEventsByJob(nil, job.Namespace, job.ID)
require.NoError(err)
require.NotNil(jobEvents)
require.Len(jobEvents, 1)
require.Contains(jobEvents, groupName)
groupEvents := jobEvents[groupName]
require.Len(groupEvents, 1)
event := groupEvents[0]
require.Nil(event.EvalID)
2020-04-01 18:11:58 +00:00
require.Greater(eventsIndex, jobCreateIndex)
events, _, _ := state.ScalingEventsByJob(nil, job.Namespace, job.ID)
require.Equal(1, len(events[groupName]))
require.Equal(int64(originalCount), events[groupName][0].PreviousCount)
}
func TestJobEndpoint_Scale_Priority(t *testing.T) {
ci.Parallel(t)
requireAssertion := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
fsmState := s1.fsm.State()
// Create a job and alter the priority.
job := mock.Job()
job.Priority = 90
originalCount := job.TaskGroups[0].Count
err := fsmState.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
requireAssertion.Nil(err)
groupName := job.TaskGroups[0].Name
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: groupName,
},
Count: pointer.Of(int64(originalCount + 1)),
Message: "scotty, we need more power",
PolicyOverride: false,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
requireAssertion.NoError(err)
requireAssertion.NotEmpty(resp.EvalID)
requireAssertion.Greater(resp.EvalCreateIndex, resp.JobModifyIndex)
// Check the evaluation priority matches the job priority.
eval, err := fsmState.EvalByID(nil, resp.EvalID)
requireAssertion.Nil(err)
requireAssertion.NotNil(eval)
requireAssertion.EqualValues(resp.EvalCreateIndex, eval.CreateIndex)
requireAssertion.Equal(job.Priority, eval.Priority)
requireAssertion.Equal(job.Type, eval.Type)
requireAssertion.Equal(structs.EvalTriggerScaling, eval.TriggeredBy)
requireAssertion.Equal(job.ID, eval.JobID)
requireAssertion.NotZero(eval.CreateTime)
requireAssertion.NotZero(eval.ModifyTime)
}
func TestJobEndpoint_Scale_SystemJob(t *testing.T) {
ci.Parallel(t)
testServer, testServerCleanup := TestServer(t, nil)
defer testServerCleanup()
codec := rpcClient(t, testServer)
testutil.WaitForLeader(t, testServer.RPC)
state := testServer.fsm.State()
mockSystemJob := mock.SystemJob()
must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 10, nil, mockSystemJob))
scaleReq := &structs.JobScaleRequest{
JobID: mockSystemJob.ID,
Target: map[string]string{
structs.ScalingTargetGroup: mockSystemJob.TaskGroups[0].Name,
},
Count: pointer.Of(int64(13)),
WriteRequest: structs.WriteRequest{
Region: DefaultRegion,
Namespace: mockSystemJob.Namespace,
},
}
var resp structs.JobRegisterResponse
must.ErrorContains(t, msgpackrpc.CallWithCodec(codec, "Job.Scale", scaleReq, &resp),
`400,cannot scale jobs of type "system"`)
}
func TestJobEndpoint_Scale_BatchJob(t *testing.T) {
ci.Parallel(t)
testServer, testServerCleanup := TestServer(t, nil)
defer testServerCleanup()
codec := rpcClient(t, testServer)
testutil.WaitForLeader(t, testServer.RPC)
state := testServer.fsm.State()
mockBatchJob := mock.BatchJob()
must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 10, nil, mockBatchJob))
scaleReq := &structs.JobScaleRequest{
JobID: mockBatchJob.ID,
Target: map[string]string{
structs.ScalingTargetGroup: mockBatchJob.TaskGroups[0].Name,
},
Count: pointer.Of(int64(13)),
WriteRequest: structs.WriteRequest{
Region: DefaultRegion,
Namespace: mockBatchJob.Namespace,
},
}
var resp structs.JobRegisterResponse
must.NoError(t, msgpackrpc.CallWithCodec(codec, "Job.Scale", scaleReq, &resp))
// Pull the generated evaluation from state and ensure the detailed type
// matches the jobspec.
testFMSSnapshot, err := testServer.fsm.State().Snapshot()
must.NoError(t, err)
scaleEval, err := testFMSSnapshot.EvalByID(nil, resp.EvalID)
must.NoError(t, err)
must.Eq(t, resp.EvalID, scaleEval.ID)
must.Eq(t, mockBatchJob.ID, scaleEval.JobID)
must.Eq(t, mockBatchJob.Type, scaleEval.Type)
}
func TestJobEndpoint_InvalidCount(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
job := mock.Job()
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
require.Nil(err)
scale := &structs.JobScaleRequest{
JobID: job.ID,
Target: map[string]string{
structs.ScalingTargetGroup: job.TaskGroups[0].Name,
},
Count: pointer.Of(int64(-1)),
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Scale", scale, &resp)
require.Error(err)
}
func TestJobEndpoint_GetScaleStatus(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, cleanupS1 := TestServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
jobV1 := mock.Job()
// check before registration
// Fetch the scaling status
get := &structs.JobScaleStatusRequest{
JobID: jobV1.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: jobV1.Namespace,
},
}
var resp2 structs.JobScaleStatusResponse
require.NoError(msgpackrpc.CallWithCodec(codec, "Job.ScaleStatus", get, &resp2))
require.Nil(resp2.JobScaleStatus)
// stopped (previous version)
require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, jobV1), "UpsertJob")
a0 := mock.Alloc()
a0.Job = jobV1
a0.Namespace = jobV1.Namespace
a0.JobID = jobV1.ID
a0.ClientStatus = structs.AllocClientStatusComplete
require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1010, []*structs.Allocation{a0}), "UpsertAllocs")
jobV2 := jobV1.Copy()
require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, 1100, nil, jobV2), "UpsertJob")
a1 := mock.Alloc()
a1.Job = jobV2
a1.Namespace = jobV2.Namespace
a1.JobID = jobV2.ID
a1.ClientStatus = structs.AllocClientStatusRunning
// healthy
a1.DeploymentStatus = &structs.AllocDeploymentStatus{
Healthy: pointer.Of(true),
}
a2 := mock.Alloc()
a2.Job = jobV2
a2.Namespace = jobV2.Namespace
a2.JobID = jobV2.ID
a2.ClientStatus = structs.AllocClientStatusPending
// unhealthy
a2.DeploymentStatus = &structs.AllocDeploymentStatus{
Healthy: pointer.Of(false),
}
a3 := mock.Alloc()
a3.Job = jobV2
a3.Namespace = jobV2.Namespace
a3.JobID = jobV2.ID
a3.ClientStatus = structs.AllocClientStatusRunning
// canary
a3.DeploymentStatus = &structs.AllocDeploymentStatus{
Healthy: pointer.Of(true),
Canary: true,
}
// no health
a4 := mock.Alloc()
a4.Job = jobV2
a4.Namespace = jobV2.Namespace
a4.JobID = jobV2.ID
a4.ClientStatus = structs.AllocClientStatusRunning
// upsert allocations
require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, 1110, []*structs.Allocation{a1, a2, a3, a4}), "UpsertAllocs")
event := &structs.ScalingEvent{
Time: time.Now().Unix(),
Count: pointer.Of(int64(5)),
Message: "message",
Error: false,
Meta: map[string]interface{}{
"a": "b",
},
EvalID: nil,
}
require.NoError(state.UpsertScalingEvent(1003, &structs.ScalingEventRequest{
Namespace: jobV2.Namespace,
JobID: jobV2.ID,
TaskGroup: jobV2.TaskGroups[0].Name,
ScalingEvent: event,
}), "UpsertScalingEvent")
// check after job registration
require.NoError(msgpackrpc.CallWithCodec(codec, "Job.ScaleStatus", get, &resp2))
require.NotNil(resp2.JobScaleStatus)
expectedStatus := structs.JobScaleStatus{
JobID: jobV2.ID,
Namespace: jobV2.Namespace,
JobCreateIndex: jobV2.CreateIndex,
JobModifyIndex: a1.CreateIndex,
JobStopped: jobV2.Stop,
TaskGroups: map[string]*structs.TaskGroupScaleStatus{
jobV2.TaskGroups[0].Name: {
Desired: jobV2.TaskGroups[0].Count,
Placed: 3,
Running: 2,
Healthy: 1,
Unhealthy: 1,
Events: []*structs.ScalingEvent{event},
},
},
}
require.True(reflect.DeepEqual(*resp2.JobScaleStatus, expectedStatus))
}
func TestJobEndpoint_GetScaleStatus_ACL(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
s1, root, cleanupS1 := TestACLServer(t, nil)
defer cleanupS1()
codec := rpcClient(t, s1)
testutil.WaitForLeader(t, s1.RPC)
state := s1.fsm.State()
// Create the job
job := mock.Job()
err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)
require.Nil(err)
// Get the job scale status
get := &structs.JobScaleStatusRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
// Get without a token should fail
var resp structs.JobScaleStatusResponse
err = msgpackrpc.CallWithCodec(codec, "Job.ScaleStatus", get, &resp)
require.NotNil(err)
require.Contains(err.Error(), "Permission denied")
// Expect failure for request with an invalid token
invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityListJobs}))
get.AuthToken = invalidToken.SecretID
require.NotNil(err)
err = msgpackrpc.CallWithCodec(codec, "Job.ScaleStatus", get, &resp)
require.Contains(err.Error(), "Permission denied")
type testCase struct {
authToken string
name string
}
cases := []testCase{
{
name: "mgmt token should succeed",
authToken: root.SecretID,
},
{
name: "read disposition should succeed",
authToken: mock.CreatePolicyAndToken(t, state, 1005, "test-valid-read",
mock.NamespacePolicy(structs.DefaultNamespace, "read", nil)).
SecretID,
},
{
name: "write disposition should succeed",
authToken: mock.CreatePolicyAndToken(t, state, 1005, "test-valid-write",
mock.NamespacePolicy(structs.DefaultNamespace, "write", nil)).
SecretID,
},
{
name: "autoscaler disposition should succeed",
authToken: mock.CreatePolicyAndToken(t, state, 1005, "test-valid-autoscaler",
mock.NamespacePolicy(structs.DefaultNamespace, "scale", nil)).
SecretID,
},
{
name: "read-job capability should succeed",
authToken: mock.CreatePolicyAndToken(t, state, 1005, "test-valid-read-job",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob})).SecretID,
},
{
name: "read-job-scaling capability should succeed",
authToken: mock.CreatePolicyAndToken(t, state, 1005, "test-valid-read-job-scaling",
mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJobScaling})).
SecretID,
},
}
for _, tc := range cases {
get.AuthToken = tc.authToken
var validResp structs.JobScaleStatusResponse
err = msgpackrpc.CallWithCodec(codec, "Job.ScaleStatus", get, &validResp)
require.NoError(err, tc.name)
require.NotNil(validResp.JobScaleStatus)
}
}
func TestJob_GetServiceRegistrations(t *testing.T) {
ci.Parallel(t)
// This function is a helper function to set up job and service which can
// be queried.
correctSetupFn := func(s *Server) (error, string, *structs.ServiceRegistration) {
// Generate an upsert a job.
job := mock.Job()
err := s.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job)
if err != nil {
return nil, "", nil
}
// Generate services. Set the jobID on the first service so this
// matches the job now held in state.
services := mock.ServiceRegistrations()
services[0].JobID = job.ID
err = s.State().UpsertServiceRegistrations(structs.MsgTypeTestSetup, 20, services)
return err, job.ID, services[0]
}
testCases := []struct {
serverFn func(t *testing.T) (*Server, *structs.ACLToken, func())
testFn func(t *testing.T, s *Server, token *structs.ACLToken)
name string
}{
{
serverFn: func(t *testing.T) (*Server, *structs.ACLToken, func()) {
server, cleanup := TestServer(t, nil)
return server, nil, cleanup
},
testFn: func(t *testing.T, s *Server, _ *structs.ACLToken) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
err, jobID, service := correctSetupFn(s)
require.NoError(t, err)
// Perform a lookup and test the response.
serviceRegReq := &structs.JobServiceRegistrationsRequest{
JobID: jobID,
QueryOptions: structs.QueryOptions{
Namespace: service.Namespace,
Region: s.Region(),
},
}
var serviceRegResp structs.JobServiceRegistrationsResponse
err = msgpackrpc.CallWithCodec(codec, structs.JobServiceRegistrationsRPCMethod, serviceRegReq, &serviceRegResp)
require.NoError(t, err)
require.EqualValues(t, uint64(20), serviceRegResp.Index)
require.ElementsMatch(t, serviceRegResp.Services, []*structs.ServiceRegistration{service})
},
name: "ACLs disabled job found with regs",
},
{
serverFn: func(t *testing.T) (*Server, *structs.ACLToken, func()) {
server, cleanup := TestServer(t, nil)
return server, nil, cleanup
},
testFn: func(t *testing.T, s *Server, _ *structs.ACLToken) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
// Generate and upsert our services.
services := mock.ServiceRegistrations()
require.NoError(t, s.State().UpsertServiceRegistrations(structs.MsgTypeTestSetup, 20, services))
// Perform a lookup on the first service using the job ID. This
// job does not exist within the Nomad state meaning the
// service is orphaned or the caller used an incorrect job ID.
serviceRegReq := &structs.JobServiceRegistrationsRequest{
JobID: services[0].JobID,
QueryOptions: structs.QueryOptions{
Namespace: services[0].Namespace,
Region: s.Region(),
},
}
var serviceRegResp structs.JobServiceRegistrationsResponse
err := msgpackrpc.CallWithCodec(codec, structs.JobServiceRegistrationsRPCMethod, serviceRegReq, &serviceRegResp)
require.NoError(t, err)
require.Nil(t, serviceRegResp.Services)
},
name: "ACLs disabled job not found",
},
{
serverFn: func(t *testing.T) (*Server, *structs.ACLToken, func()) {
server, cleanup := TestServer(t, nil)
return server, nil, cleanup
},
testFn: func(t *testing.T, s *Server, _ *structs.ACLToken) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
// Generate an upsert a job.
job := mock.Job()
require.NoError(t, s.State().UpsertJob(structs.MsgTypeTestSetup, 10, nil, job))
// Perform a lookup and test the response.
serviceRegReq := &structs.JobServiceRegistrationsRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Namespace: job.Namespace,
Region: s.Region(),
},
}
var serviceRegResp structs.JobServiceRegistrationsResponse
err := msgpackrpc.CallWithCodec(codec, structs.JobServiceRegistrationsRPCMethod, serviceRegReq, &serviceRegResp)
require.NoError(t, err)
require.ElementsMatch(t, serviceRegResp.Services, []*structs.ServiceRegistration{})
},
name: "ACLs disabled job found without regs",
},
{
serverFn: func(t *testing.T) (*Server, *structs.ACLToken, func()) {
return TestACLServer(t, nil)
},
testFn: func(t *testing.T, s *Server, token *structs.ACLToken) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
err, jobID, service := correctSetupFn(s)
require.NoError(t, err)
// Perform a lookup and test the response.
serviceRegReq := &structs.JobServiceRegistrationsRequest{
JobID: jobID,
QueryOptions: structs.QueryOptions{
Namespace: service.Namespace,
Region: s.Region(),
AuthToken: token.SecretID,
},
}
var serviceRegResp structs.JobServiceRegistrationsResponse
err = msgpackrpc.CallWithCodec(codec, structs.JobServiceRegistrationsRPCMethod, serviceRegReq, &serviceRegResp)
require.NoError(t, err)
require.ElementsMatch(t, serviceRegResp.Services, []*structs.ServiceRegistration{service})
},
name: "ACLs enabled use management token",
},
{
serverFn: func(t *testing.T) (*Server, *structs.ACLToken, func()) {
return TestACLServer(t, nil)
},
testFn: func(t *testing.T, s *Server, _ *structs.ACLToken) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
err, jobID, service := correctSetupFn(s)
require.NoError(t, err)
// Create and policy and grab the auth token.
authToken := mock.CreatePolicyAndToken(t, s.State(), 30, "test-node-get-service-reg",
mock.NamespacePolicy(service.Namespace, "", []string{acl.NamespaceCapabilityReadJob})).SecretID
// Perform a lookup and test the response.
serviceRegReq := &structs.JobServiceRegistrationsRequest{
JobID: jobID,
QueryOptions: structs.QueryOptions{
Namespace: service.Namespace,
Region: s.Region(),
AuthToken: authToken,
},
}
var serviceRegResp structs.JobServiceRegistrationsResponse
err = msgpackrpc.CallWithCodec(codec, structs.JobServiceRegistrationsRPCMethod, serviceRegReq, &serviceRegResp)
require.NoError(t, err)
require.ElementsMatch(t, serviceRegResp.Services, []*structs.ServiceRegistration{service})
},
name: "ACLs enabled use read-job namespace capability token",
},
{
serverFn: func(t *testing.T) (*Server, *structs.ACLToken, func()) {
return TestACLServer(t, nil)
},
testFn: func(t *testing.T, s *Server, _ *structs.ACLToken) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
err, jobID, service := correctSetupFn(s)
require.NoError(t, err)
// Create and policy and grab the auth token.
authToken := mock.CreatePolicyAndToken(t, s.State(), 30, "test-node-get-service-reg",
mock.NamespacePolicy(service.Namespace, "read", nil)).SecretID
// Perform a lookup and test the response.
serviceRegReq := &structs.JobServiceRegistrationsRequest{
JobID: jobID,
QueryOptions: structs.QueryOptions{
Namespace: service.Namespace,
Region: s.Region(),
AuthToken: authToken,
},
}
var serviceRegResp structs.JobServiceRegistrationsResponse
err = msgpackrpc.CallWithCodec(codec, structs.JobServiceRegistrationsRPCMethod, serviceRegReq, &serviceRegResp)
require.NoError(t, err)
require.ElementsMatch(t, serviceRegResp.Services, []*structs.ServiceRegistration{service})
},
name: "ACLs enabled use read namespace policy token",
},
{
serverFn: func(t *testing.T) (*Server, *structs.ACLToken, func()) {
return TestACLServer(t, nil)
},
testFn: func(t *testing.T, s *Server, _ *structs.ACLToken) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
err, jobID, service := correctSetupFn(s)
require.NoError(t, err)
// Create and policy and grab the auth token.
authToken := mock.CreatePolicyAndToken(t, s.State(), 30, "test-node-get-service-reg",
mock.NamespacePolicy("ohno", "read", nil)).SecretID
// Perform a lookup and test the response.
serviceRegReq := &structs.JobServiceRegistrationsRequest{
JobID: jobID,
QueryOptions: structs.QueryOptions{
Namespace: service.Namespace,
Region: s.Region(),
AuthToken: authToken,
},
}
var serviceRegResp structs.JobServiceRegistrationsResponse
err = msgpackrpc.CallWithCodec(codec, structs.JobServiceRegistrationsRPCMethod, serviceRegReq, &serviceRegResp)
require.Error(t, err)
require.Contains(t, err.Error(), "Permission denied")
require.Empty(t, serviceRegResp.Services)
},
name: "ACLs enabled use read incorrect namespace policy token",
},
{
serverFn: func(t *testing.T) (*Server, *structs.ACLToken, func()) {
return TestACLServer(t, nil)
},
testFn: func(t *testing.T, s *Server, _ *structs.ACLToken) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
err, jobID, service := correctSetupFn(s)
require.NoError(t, err)
// Create and policy and grab the auth token.
authToken := mock.CreatePolicyAndToken(t, s.State(), 30, "test-node-get-service-reg",
mock.NamespacePolicy(service.Namespace, "", []string{acl.NamespaceCapabilityReadScalingPolicy})).SecretID
// Perform a lookup and test the response.
serviceRegReq := &structs.JobServiceRegistrationsRequest{
JobID: jobID,
QueryOptions: structs.QueryOptions{
Namespace: service.Namespace,
Region: s.Region(),
AuthToken: authToken,
},
}
var serviceRegResp structs.JobServiceRegistrationsResponse
err = msgpackrpc.CallWithCodec(codec, structs.JobServiceRegistrationsRPCMethod, serviceRegReq, &serviceRegResp)
require.Error(t, err)
require.Contains(t, err.Error(), "Permission denied")
require.Empty(t, serviceRegResp.Services)
},
name: "ACLs enabled use incorrect capability",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
server, aclToken, cleanup := tc.serverFn(t)
defer cleanup()
tc.testFn(t, server, aclToken)
})
}
}