open-nomad/command/agent/job_endpoint_test.go

3437 lines
87 KiB
Go
Raw Normal View History

2015-09-06 01:00:30 +00:00
package agent
2015-09-06 01:20:47 +00:00
import (
"net/http"
"net/http/httptest"
2016-12-02 00:27:22 +00:00
"reflect"
2015-09-06 01:20:47 +00:00
"testing"
2017-02-13 23:18:17 +00:00
"time"
2015-09-06 01:20:47 +00:00
2016-12-02 00:27:22 +00:00
"github.com/golang/snappy"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
consul/connect: add support for connect mesh gateways This PR implements first-class support for Nomad running Consul Connect Mesh Gateways. Mesh gateways enable services in the Connect mesh to make cross-DC connections via gateways, where each datacenter may not have full node interconnectivity. Consul docs with more information: https://www.consul.io/docs/connect/gateways/mesh-gateway The following group level service block can be used to establish a Connect mesh gateway. service { connect { gateway { mesh { // no configuration } } } } Services can make use of a mesh gateway by configuring so in their upstream blocks, e.g. service { connect { sidecar_service { proxy { upstreams { destination_name = "<service>" local_bind_port = <port> datacenter = "<datacenter>" mesh_gateway { mode = "<mode>" } } } } } } Typical use of a mesh gateway is to create a bridge between datacenters. A mesh gateway should then be configured with a service port that is mapped from a host_network configured on a WAN interface in Nomad agent config, e.g. client { host_network "public" { interface = "eth1" } } Create a port mapping in the group.network block for use by the mesh gateway service from the public host_network, e.g. network { mode = "bridge" port "mesh_wan" { host_network = "public" } } Use this port label for the service.port of the mesh gateway, e.g. service { name = "mesh-gateway" port = "mesh_wan" connect { gateway { mesh {} } } } Currently Envoy is the only supported gateway implementation in Consul. By default Nomad client will run the latest official Envoy docker image supported by the local Consul agent. The Envoy task can be customized by setting `meta.connect.gateway_image` in agent config or by setting the `connect.sidecar_task` block. Gateways require Consul 1.8.0+, enforced by the Nomad scheduler. Closes #9446
2021-04-12 19:10:10 +00:00
api "github.com/hashicorp/nomad/api"
2017-02-13 23:18:17 +00:00
"github.com/hashicorp/nomad/helper"
2015-09-06 01:20:47 +00:00
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
)
2015-09-06 19:32:22 +00:00
func TestHTTP_JobsList(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2015-09-06 19:32:22 +00:00
for i := 0; i < 3; i++ {
// Create the job
2017-02-22 22:15:22 +00:00
job := mock.Job()
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2015-09-06 19:32:22 +00:00
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
}
// Make the HTTP request
req, err := http.NewRequest("GET", "/v1/jobs", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobsRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
t.Fatalf("missing known leader")
}
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
t.Fatalf("missing last contact")
}
// Check the job
j := obj.([]*structs.JobListStub)
if len(j) != 3 {
t.Fatalf("bad: %#v", j)
}
})
}
func TestHTTP_PrefixJobsList(t *testing.T) {
ids := []string{
"aaaaaaaa-e8f7-fd38-c855-ab94ceb89706",
"aabbbbbb-e8f7-fd38-c855-ab94ceb89706",
"aabbcccc-e8f7-fd38-c855-ab94ceb89706",
}
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
for i := 0; i < 3; i++ {
// Create the job
2017-02-22 22:15:22 +00:00
job := mock.Job()
job.ID = ids[i]
job.TaskGroups[0].Count = 1
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
}
// Make the HTTP request
req, err := http.NewRequest("GET", "/v1/jobs?prefix=aabb", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobsRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
t.Fatalf("missing known leader")
}
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
t.Fatalf("missing last contact")
}
// Check the job
j := obj.([]*structs.JobListStub)
if len(j) != 2 {
t.Fatalf("bad: %#v", j)
}
})
}
func TestHTTP_JobsList_AllNamespaces_OSS(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {
for i := 0; i < 3; i++ {
// Create the job
job := mock.Job()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
err := s.Agent.RPC("Job.Register", &args, &resp)
require.NoError(t, err)
}
// Make the HTTP request
req, err := http.NewRequest("GET", "/v1/jobs?namespace=*", nil)
require.NoError(t, err)
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobsRequest(respW, req)
require.NoError(t, err)
// Check for the index
require.NotEmpty(t, respW.HeaderMap.Get("X-Nomad-Index"), "missing index")
require.Equal(t, "true", respW.HeaderMap.Get("X-Nomad-KnownLeader"), "missing known leader")
require.NotEmpty(t, respW.HeaderMap.Get("X-Nomad-LastContact"), "missing last contact")
// Check the job
j := obj.([]*structs.JobListStub)
require.Len(t, j, 3)
require.Equal(t, "default", j[0].Namespace)
})
}
2015-09-06 18:47:52 +00:00
func TestHTTP_JobsRegister(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2015-09-06 18:47:52 +00:00
// Create the job
job := MockJob()
args := api.JobRegisterRequest{
2015-09-06 18:47:52 +00:00
Job: job,
WriteRequest: api.WriteRequest{Region: "global"},
2015-09-06 18:47:52 +00:00
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/jobs", buf)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobsRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
dereg := obj.(structs.JobRegisterResponse)
if dereg.EvalID == "" {
t.Fatalf("bad: %v", dereg)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
// Check the job is registered
getReq := structs.JobSpecificRequest{
2017-09-07 23:56:15 +00:00
JobID: *job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2015-09-06 18:47:52 +00:00
}
var getResp structs.SingleJobResponse
if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil {
t.Fatalf("err: %v", err)
}
if getResp.Job == nil {
t.Fatalf("job does not exist")
}
})
}
func TestHTTP_JobsRegister_IgnoresParentID(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := MockJob()
parentID := "somebadparentid"
job.ParentID = &parentID
args := api.JobRegisterRequest{
Job: job,
WriteRequest: api.WriteRequest{Region: "global"},
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/jobs", buf)
require.NoError(t, err)
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobsRequest(respW, req)
require.NoError(t, err)
// Check the response
reg := obj.(structs.JobRegisterResponse)
require.NotEmpty(t, reg.EvalID)
// Check for the index
require.NotEmpty(t, respW.HeaderMap.Get("X-Nomad-Index"))
// Check the job is registered
getReq := structs.JobSpecificRequest{
JobID: *job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var getResp structs.SingleJobResponse
err = s.Agent.RPC("Job.GetJob", &getReq, &getResp)
require.NoError(t, err)
require.NotNil(t, getResp.Job)
require.Equal(t, *job.ID, getResp.Job.ID)
require.Empty(t, getResp.Job.ParentID)
// check the eval exists
evalReq := structs.EvalSpecificRequest{
EvalID: reg.EvalID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var evalResp structs.SingleEvalResponse
err = s.Agent.RPC("Eval.GetEval", &evalReq, &evalResp)
require.NoError(t, err)
require.NotNil(t, evalResp.Eval)
require.Equal(t, reg.EvalID, evalResp.Eval.ID)
})
}
2017-08-23 21:01:31 +00:00
// Test that ACL token is properly threaded through to the RPC endpoint
2017-08-21 04:39:36 +00:00
func TestHTTP_JobsRegister_ACL(t *testing.T) {
t.Parallel()
httpACLTest(t, nil, func(s *TestAgent) {
// Create the job
job := MockJob()
2017-08-21 04:39:36 +00:00
args := api.JobRegisterRequest{
Job: job,
WriteRequest: api.WriteRequest{
Region: "global",
},
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/jobs", buf)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
setToken(req, s.RootToken)
2017-08-21 04:39:36 +00:00
// Make the request
obj, err := s.Server.JobsRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
assert.NotNil(t, obj)
})
}
func TestHTTP_JobsRegister_Defaulting(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := MockJob()
// Do not set its priority
job.Priority = nil
args := api.JobRegisterRequest{
Job: job,
WriteRequest: api.WriteRequest{Region: "global"},
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/jobs", buf)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobsRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
dereg := obj.(structs.JobRegisterResponse)
if dereg.EvalID == "" {
t.Fatalf("bad: %v", dereg)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
// Check the job is registered
getReq := structs.JobSpecificRequest{
2017-09-07 23:56:15 +00:00
JobID: *job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var getResp structs.SingleJobResponse
if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil {
t.Fatalf("err: %v", err)
}
if getResp.Job == nil {
t.Fatalf("job does not exist")
}
if getResp.Job.Priority != 50 {
t.Fatalf("job didn't get defaulted")
}
})
}
func TestHTTP_JobsParse(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {
buf := encodeReq(api.JobsParseRequest{JobHCL: mock.HCL()})
req, err := http.NewRequest("POST", "/v1/jobs/parse", buf)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
obj, err := s.Server.JobsParseRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
if obj == nil {
t.Fatal("response should not be nil")
}
job := obj.(*api.Job)
expected := mock.Job()
if job.Name == nil || *job.Name != expected.Name {
t.Fatalf("job name is '%s', expected '%s'", *job.Name, expected.Name)
}
if job.Datacenters == nil ||
job.Datacenters[0] != expected.Datacenters[0] {
t.Fatalf("job datacenters is '%s', expected '%s'",
job.Datacenters[0], expected.Datacenters[0])
}
})
}
2015-09-06 01:43:40 +00:00
func TestHTTP_JobQuery(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2015-09-06 01:43:40 +00:00
// Create the job
2017-02-23 00:56:51 +00:00
job := mock.Job()
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2015-09-06 01:43:40 +00:00
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Make the HTTP request
2017-02-23 00:56:51 +00:00
req, err := http.NewRequest("GET", "/v1/job/"+job.ID, nil)
2015-09-06 01:43:40 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
t.Fatalf("missing known leader")
}
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
t.Fatalf("missing last contact")
}
// Check the job
j := obj.(*structs.Job)
2017-02-23 00:56:51 +00:00
if j.ID != job.ID {
2015-09-06 01:43:40 +00:00
t.Fatalf("bad: %#v", j)
}
})
}
2016-12-14 20:50:08 +00:00
func TestHTTP_JobQuery_Payload(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2016-12-02 00:27:22 +00:00
// Create the job
job := mock.Job()
2016-12-14 20:50:08 +00:00
// Insert Payload compressed
2016-12-02 00:27:22 +00:00
expected := []byte("hello world")
compressed := snappy.Encode(nil, expected)
2016-12-14 20:50:08 +00:00
job.Payload = compressed
2016-12-02 00:27:22 +00:00
2016-12-19 00:54:12 +00:00
// Directly manipulate the state
state := s.Agent.server.State()
if err := state.UpsertJob(structs.MsgTypeTestSetup, 1000, job); err != nil {
2016-12-19 00:54:12 +00:00
t.Fatalf("Failed to upsert job: %v", err)
2016-12-02 00:27:22 +00:00
}
// Make the HTTP request
req, err := http.NewRequest("GET", "/v1/job/"+job.ID, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
t.Fatalf("missing known leader")
}
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
t.Fatalf("missing last contact")
}
// Check the job
j := obj.(*structs.Job)
if j.ID != job.ID {
t.Fatalf("bad: %#v", j)
}
2016-12-14 20:50:08 +00:00
// Check the payload is decompressed
if !reflect.DeepEqual(j.Payload, expected) {
t.Fatalf("Payload not decompressed properly; got %#v; want %#v", j.Payload, expected)
2016-12-02 00:27:22 +00:00
}
})
}
func TestHTTP_jobUpdate_systemScaling(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := MockJob()
job.Type = helper.StringToPtr("system")
job.TaskGroups[0].Scaling = &api.ScalingPolicy{Enabled: helper.BoolToPtr(true)}
args := api.JobRegisterRequest{
Job: job,
WriteRequest: api.WriteRequest{
Region: "global",
Namespace: api.DefaultNamespace,
},
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID, buf)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
assert.Nil(t, obj)
assert.Equal(t, CodedError(400, "Task groups with job type system do not support scaling stanzas"), err)
})
}
2015-09-06 02:08:47 +00:00
func TestHTTP_JobUpdate(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2015-09-06 02:08:47 +00:00
// Create the job
job := MockJob()
args := api.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: api.WriteRequest{
Region: "global",
Namespace: api.DefaultNamespace,
},
2015-09-06 02:08:47 +00:00
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID, buf)
2015-09-06 02:08:47 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
dereg := obj.(structs.JobRegisterResponse)
if dereg.EvalID == "" {
t.Fatalf("bad: %v", dereg)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
// Check the job is registered
getReq := structs.JobSpecificRequest{
2017-09-07 23:56:15 +00:00
JobID: *job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2015-09-06 02:08:47 +00:00
}
var getResp structs.SingleJobResponse
if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil {
t.Fatalf("err: %v", err)
}
if getResp.Job == nil {
t.Fatalf("job does not exist")
}
})
}
func TestHTTP_JobUpdateRegion(t *testing.T) {
t.Parallel()
cases := []struct {
Name string
ConfigRegion string
APIRegion string
ExpectedRegion string
}{
{
Name: "api region takes precedence",
ConfigRegion: "not-global",
APIRegion: "north-america",
ExpectedRegion: "north-america",
},
{
Name: "config region is set",
ConfigRegion: "north-america",
APIRegion: "",
ExpectedRegion: "north-america",
},
{
Name: "api region is set",
ConfigRegion: "",
APIRegion: "north-america",
ExpectedRegion: "north-america",
},
{
Name: "defaults to node region global if no region is provided",
ConfigRegion: "",
APIRegion: "",
ExpectedRegion: "global",
},
{
Name: "defaults to node region not-global if no region is provided",
ConfigRegion: "",
APIRegion: "",
ExpectedRegion: "not-global",
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
httpTest(t, func(c *Config) { c.Region = tc.ExpectedRegion }, func(s *TestAgent) {
// Create the job
job := MockRegionalJob()
if tc.ConfigRegion == "" {
job.Region = nil
} else {
job.Region = &tc.ConfigRegion
}
args := api.JobRegisterRequest{
Job: job,
WriteRequest: api.WriteRequest{
Namespace: api.DefaultNamespace,
Region: tc.APIRegion,
},
}
buf := encodeReq(args)
// Make the HTTP request
url := "/v1/job/" + *job.ID
req, err := http.NewRequest("PUT", url, buf)
require.NoError(t, err)
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
require.NoError(t, err)
// Check the response
dereg := obj.(structs.JobRegisterResponse)
require.NotEmpty(t, dereg.EvalID)
// Check for the index
require.NotEmpty(t, respW.HeaderMap.Get("X-Nomad-Index"), "missing index")
// Check the job is registered
getReq := structs.JobSpecificRequest{
JobID: *job.ID,
QueryOptions: structs.QueryOptions{
Region: tc.ExpectedRegion,
Namespace: structs.DefaultNamespace,
},
}
var getResp structs.SingleJobResponse
err = s.Agent.RPC("Job.GetJob", &getReq, &getResp)
require.NoError(t, err)
require.NotNil(t, getResp.Job, "job does not exist")
require.Equal(t, tc.ExpectedRegion, getResp.Job.Region)
})
})
}
}
2015-09-06 01:20:47 +00:00
func TestHTTP_JobDelete(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2015-09-06 01:20:47 +00:00
// Create the job
2017-02-23 00:56:51 +00:00
job := mock.Job()
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2015-09-06 01:20:47 +00:00
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
2017-04-15 03:54:30 +00:00
// Make the HTTP request to do a soft delete
2017-02-23 00:56:51 +00:00
req, err := http.NewRequest("DELETE", "/v1/job/"+job.ID, nil)
2015-09-06 01:20:47 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
dereg := obj.(structs.JobDeregisterResponse)
if dereg.EvalID == "" {
t.Fatalf("bad: %v", dereg)
}
2015-09-06 01:43:40 +00:00
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
2017-04-15 03:54:30 +00:00
// Check the job is still queryable
getReq1 := structs.JobSpecificRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2017-04-15 03:54:30 +00:00
}
var getResp1 structs.SingleJobResponse
if err := s.Agent.RPC("Job.GetJob", &getReq1, &getResp1); err != nil {
t.Fatalf("err: %v", err)
}
if getResp1.Job == nil {
t.Fatalf("job doesn't exists")
}
if !getResp1.Job.Stop {
t.Fatalf("job should be marked as stop")
}
// Make the HTTP request to do a purge delete
req2, err := http.NewRequest("DELETE", "/v1/job/"+job.ID+"?purge=true", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
respW.Flush()
// Make the request
obj, err = s.Server.JobSpecificRequest(respW, req2)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
dereg = obj.(structs.JobDeregisterResponse)
if dereg.EvalID == "" {
t.Fatalf("bad: %v", dereg)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
2015-09-06 01:20:47 +00:00
// Check the job is gone
2017-04-15 03:54:30 +00:00
getReq2 := structs.JobSpecificRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2015-09-06 01:20:47 +00:00
}
2017-04-15 03:54:30 +00:00
var getResp2 structs.SingleJobResponse
if err := s.Agent.RPC("Job.GetJob", &getReq2, &getResp2); err != nil {
2015-09-06 01:20:47 +00:00
t.Fatalf("err: %v", err)
}
2017-04-15 03:54:30 +00:00
if getResp2.Job != nil {
2015-09-06 02:08:47 +00:00
t.Fatalf("job still exists")
}
2015-09-06 01:20:47 +00:00
})
}
func TestHTTP_Job_ScaleTaskGroup(t *testing.T) {
t.Parallel()
require := require.New(t)
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := mock.Job()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
require.NoError(s.Agent.RPC("Job.Register", &args, &resp))
newCount := job.TaskGroups[0].Count + 1
scaleReq := &api.ScalingRequest{
Count: helper.Int64ToPtr(int64(newCount)),
Message: "testing",
Target: map[string]string{
"Job": job.ID,
"Group": job.TaskGroups[0].Name,
},
}
buf := encodeReq(scaleReq)
// Make the HTTP request to scale the job group
req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/scale", buf)
require.NoError(err)
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
require.NoError(err)
// Check the response
resp = obj.(structs.JobRegisterResponse)
require.NotEmpty(resp.EvalID)
// Check for the index
require.NotEmpty(respW.Header().Get("X-Nomad-Index"))
// Check that the group count was changed
getReq := structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var getResp structs.SingleJobResponse
err = s.Agent.RPC("Job.GetJob", &getReq, &getResp)
require.NoError(err)
require.NotNil(getResp.Job)
require.Equal(newCount, getResp.Job.TaskGroups[0].Count)
})
}
func TestHTTP_Job_ScaleStatus(t *testing.T) {
t.Parallel()
require := require.New(t)
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := mock.Job()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Make the HTTP request to scale the job group
req, err := http.NewRequest("GET", "/v1/job/"+job.ID+"/scale", nil)
require.NoError(err)
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
require.NoError(err)
// Check the response
status := obj.(*structs.JobScaleStatus)
require.NotEmpty(resp.EvalID)
require.Equal(job.TaskGroups[0].Count, status.TaskGroups[job.TaskGroups[0].Name].Desired)
// Check for the index
require.NotEmpty(respW.Header().Get("X-Nomad-Index"))
})
}
func TestHTTP_JobForceEvaluate(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
// Create the job
2017-02-23 00:56:51 +00:00
job := mock.Job()
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Make the HTTP request
2017-02-23 00:56:51 +00:00
req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/evaluate", nil)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
reg := obj.(structs.JobRegisterResponse)
if reg.EvalID == "" {
t.Fatalf("bad: %v", reg)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
})
}
2015-09-06 19:32:22 +00:00
func TestHTTP_JobEvaluate_ForceReschedule(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := mock.Job()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
jobEvalReq := api.JobEvaluateRequest{
JobID: job.ID,
EvalOptions: api.EvalOptions{
ForceReschedule: true,
},
}
buf := encodeReq(jobEvalReq)
// Make the HTTP request
req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/evaluate", buf)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
reg := obj.(structs.JobRegisterResponse)
if reg.EvalID == "" {
t.Fatalf("bad: %v", reg)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
})
}
2015-09-06 21:15:29 +00:00
func TestHTTP_JobEvaluations(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2015-09-06 19:32:22 +00:00
// Create the job
2017-02-23 00:56:51 +00:00
job := mock.Job()
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2015-09-06 19:32:22 +00:00
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Make the HTTP request
2017-02-23 00:56:51 +00:00
req, err := http.NewRequest("GET", "/v1/job/"+job.ID+"/evaluations", nil)
2015-09-06 19:32:22 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
evals := obj.([]*structs.Evaluation)
// Can be multiple evals, use the last one, since they are in order
idx := len(evals) - 1
2016-08-16 23:12:37 +00:00
if len(evals) < 0 || evals[idx].ID != resp.EvalID {
2015-09-06 19:32:22 +00:00
t.Fatalf("bad: %v", evals)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
t.Fatalf("missing known leader")
}
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
t.Fatalf("missing last contact")
}
})
}
2015-09-06 21:15:29 +00:00
func TestHTTP_JobAllocations(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2015-09-06 19:32:22 +00:00
// Create the job
alloc1 := mock.Alloc()
2015-09-06 19:32:22 +00:00
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: alloc1.Job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2015-09-06 19:32:22 +00:00
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Directly manipulate the state
expectedDisplayMsg := "test message"
testEvent := structs.NewTaskEvent("test event").SetMessage(expectedDisplayMsg)
var events []*structs.TaskEvent
events = append(events, testEvent)
taskState := &structs.TaskState{Events: events}
alloc1.TaskStates = make(map[string]*structs.TaskState)
alloc1.TaskStates["test"] = taskState
2015-09-06 19:32:22 +00:00
state := s.Agent.server.State()
err := state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1})
2015-09-06 19:32:22 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
// Make the HTTP request
req, err := http.NewRequest("GET", "/v1/job/"+alloc1.Job.ID+"/allocations?all=true", nil)
2015-09-06 19:32:22 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
2015-09-06 23:18:25 +00:00
allocs := obj.([]*structs.AllocListStub)
2015-09-06 19:32:22 +00:00
if len(allocs) != 1 && allocs[0].ID != alloc1.ID {
t.Fatalf("bad: %v", allocs)
}
displayMsg := allocs[0].TaskStates["test"].Events[0].DisplayMessage
assert.Equal(t, expectedDisplayMsg, displayMsg)
2015-09-06 19:32:22 +00:00
2017-04-13 23:55:21 +00:00
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
t.Fatalf("missing known leader")
}
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
t.Fatalf("missing last contact")
}
})
}
2017-07-01 00:23:34 +00:00
func TestHTTP_JobDeployments(t *testing.T) {
assert := assert.New(t)
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2017-07-01 00:23:34 +00:00
// Create the job
j := mock.Job()
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: j,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2017-07-01 00:23:34 +00:00
}
var resp structs.JobRegisterResponse
assert.Nil(s.Agent.RPC("Job.Register", &args, &resp), "JobRegister")
// Directly manipulate the state
state := s.Agent.server.State()
d := mock.Deployment()
d.JobID = j.ID
d.JobCreateIndex = resp.JobModifyIndex
assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment")
2017-07-01 00:23:34 +00:00
// Make the HTTP request
req, err := http.NewRequest("GET", "/v1/job/"+j.ID+"/deployments", nil)
assert.Nil(err, "HTTP")
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
assert.Nil(err, "JobSpecificRequest")
// Check the response
deploys := obj.([]*structs.Deployment)
assert.Len(deploys, 1, "deployments")
assert.Equal(d.ID, deploys[0].ID, "deployment id")
assert.NotZero(respW.HeaderMap.Get("X-Nomad-Index"), "missing index")
assert.Equal("true", respW.HeaderMap.Get("X-Nomad-KnownLeader"), "missing known leader")
assert.NotZero(respW.HeaderMap.Get("X-Nomad-LastContact"), "missing last contact")
})
}
func TestHTTP_JobDeployment(t *testing.T) {
assert := assert.New(t)
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2017-07-01 00:23:34 +00:00
// Create the job
j := mock.Job()
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: j,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2017-07-01 00:23:34 +00:00
}
var resp structs.JobRegisterResponse
assert.Nil(s.Agent.RPC("Job.Register", &args, &resp), "JobRegister")
// Directly manipulate the state
state := s.Agent.server.State()
d := mock.Deployment()
d.JobID = j.ID
2019-05-14 21:13:41 +00:00
d.JobCreateIndex = resp.JobModifyIndex
assert.Nil(state.UpsertDeployment(1000, d), "UpsertDeployment")
2017-07-01 00:23:34 +00:00
// Make the HTTP request
req, err := http.NewRequest("GET", "/v1/job/"+j.ID+"/deployment", nil)
assert.Nil(err, "HTTP")
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
assert.Nil(err, "JobSpecificRequest")
// Check the response
out := obj.(*structs.Deployment)
assert.NotNil(out, "deployment")
assert.Equal(d.ID, out.ID, "deployment id")
assert.NotZero(respW.HeaderMap.Get("X-Nomad-Index"), "missing index")
assert.Equal("true", respW.HeaderMap.Get("X-Nomad-KnownLeader"), "missing known leader")
assert.NotZero(respW.HeaderMap.Get("X-Nomad-LastContact"), "missing last contact")
})
}
2017-04-13 23:55:21 +00:00
func TestHTTP_JobVersions(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2017-04-13 23:55:21 +00:00
// Create the job
job := mock.Job()
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2017-04-13 23:55:21 +00:00
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
job2 := mock.Job()
job2.ID = job.ID
job2.Priority = 100
args2 := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job2,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2017-04-13 23:55:21 +00:00
}
var resp2 structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args2, &resp2); err != nil {
t.Fatalf("err: %v", err)
}
// Make the HTTP request
req, err := http.NewRequest("GET", "/v1/job/"+job.ID+"/versions?diffs=true", nil)
2017-04-13 23:55:21 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
vResp := obj.(structs.JobVersionsResponse)
versions := vResp.Versions
2017-04-13 23:55:21 +00:00
if len(versions) != 2 {
t.Fatalf("got %d versions; want 2", len(versions))
}
if v := versions[0]; v.Version != 1 || v.Priority != 100 {
t.Fatalf("bad %v", v)
}
if v := versions[1]; v.Version != 0 {
t.Fatalf("bad %v", v)
}
if len(vResp.Diffs) != 1 {
t.Fatalf("bad %v", vResp)
}
2015-09-06 19:32:22 +00:00
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
if respW.HeaderMap.Get("X-Nomad-KnownLeader") != "true" {
t.Fatalf("missing known leader")
}
if respW.HeaderMap.Get("X-Nomad-LastContact") == "" {
t.Fatalf("missing last contact")
}
})
}
2016-01-19 19:09:36 +00:00
func TestHTTP_PeriodicForce(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2016-01-19 19:09:36 +00:00
// Create and register a periodic job.
2017-02-23 00:56:51 +00:00
job := mock.PeriodicJob()
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2016-01-19 19:09:36 +00:00
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Make the HTTP request
2017-02-23 00:56:51 +00:00
req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/periodic/force", nil)
2016-01-19 19:09:36 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
// Check the response
r := obj.(structs.PeriodicForceResponse)
if r.EvalID == "" {
t.Fatalf("bad: %#v", r)
}
})
}
func TestHTTP_JobPlan(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := MockJob()
args := api.JobPlanRequest{
2017-09-07 23:56:15 +00:00
Job: job,
Diff: true,
WriteRequest: api.WriteRequest{
Region: "global",
Namespace: api.DefaultNamespace,
},
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID+"/plan", buf)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
plan := obj.(structs.JobPlanResponse)
2016-05-16 19:49:18 +00:00
if plan.Annotations == nil {
t.Fatalf("bad: %v", plan)
}
if plan.Diff == nil {
t.Fatalf("bad: %v", plan)
}
})
}
2016-12-02 00:27:22 +00:00
func TestHTTP_JobPlanRegion(t *testing.T) {
t.Parallel()
cases := []struct {
Name string
ConfigRegion string
APIRegion string
ExpectedRegion string
}{
{
Name: "api region takes precedence",
ConfigRegion: "not-global",
APIRegion: "north-america",
ExpectedRegion: "north-america",
},
{
Name: "config region is set",
ConfigRegion: "north-america",
APIRegion: "",
ExpectedRegion: "north-america",
},
{
Name: "api region is set",
ConfigRegion: "",
APIRegion: "north-america",
ExpectedRegion: "north-america",
},
{
Name: "falls back to default if no region is provided",
ConfigRegion: "",
APIRegion: "",
ExpectedRegion: "global",
},
}
for _, tc := range cases {
t.Run(tc.Name, func(t *testing.T) {
httpTest(t, func(c *Config) { c.Region = tc.ExpectedRegion }, func(s *TestAgent) {
// Create the job
job := MockRegionalJob()
if tc.ConfigRegion == "" {
job.Region = nil
} else {
job.Region = &tc.ConfigRegion
}
args := api.JobPlanRequest{
Job: job,
Diff: true,
WriteRequest: api.WriteRequest{
Region: tc.APIRegion,
Namespace: api.DefaultNamespace,
},
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID+"/plan", buf)
require.NoError(t, err)
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
require.NoError(t, err)
// Check the response
plan := obj.(structs.JobPlanResponse)
require.NotNil(t, plan.Annotations)
require.NotNil(t, plan.Diff)
})
})
}
}
2016-12-02 00:27:22 +00:00
func TestHTTP_JobDispatch(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
// Create the parameterized job
2018-03-20 22:28:38 +00:00
job := mock.BatchJob()
2017-02-23 00:56:51 +00:00
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
2016-12-02 00:27:22 +00:00
2017-02-23 00:56:51 +00:00
args := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2016-12-02 00:27:22 +00:00
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
t.Fatalf("err: %v", err)
}
// Make the request
respW := httptest.NewRecorder()
args2 := structs.JobDispatchRequest{
2017-09-07 23:56:15 +00:00
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
IdempotencyToken: "foo",
2017-09-07 23:56:15 +00:00
},
2016-12-02 00:27:22 +00:00
}
buf := encodeReq(args2)
// Make the HTTP request
2017-02-23 00:56:51 +00:00
req2, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/dispatch", buf)
2016-12-02 00:27:22 +00:00
if err != nil {
t.Fatalf("err: %v", err)
}
respW.Flush()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req2)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
dispatch := obj.(structs.JobDispatchResponse)
if dispatch.EvalID == "" {
t.Fatalf("bad: %v", dispatch)
}
if dispatch.DispatchedJobID == "" {
t.Fatalf("bad: %v", dispatch)
}
})
}
2017-02-13 23:18:17 +00:00
2017-04-19 20:24:06 +00:00
func TestHTTP_JobRevert(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2017-04-19 20:24:06 +00:00
// Create the job and register it twice
job := mock.Job()
regReq := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2017-04-19 20:24:06 +00:00
}
var regResp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &regReq, &regResp); err != nil {
t.Fatalf("err: %v", err)
}
// Change the job to get a new version
job.Datacenters = append(job.Datacenters, "foo")
2017-04-19 20:24:06 +00:00
if err := s.Agent.RPC("Job.Register", &regReq, &regResp); err != nil {
t.Fatalf("err: %v", err)
}
args := structs.JobRevertRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
JobVersion: 0,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2017-04-19 20:24:06 +00:00
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/revert", buf)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
revertResp := obj.(structs.JobRegisterResponse)
if revertResp.EvalID == "" {
t.Fatalf("bad: %v", revertResp)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
})
}
2017-07-06 19:49:13 +00:00
func TestHTTP_JobStable(t *testing.T) {
2017-07-20 05:42:15 +00:00
t.Parallel()
2017-07-20 05:14:36 +00:00
httpTest(t, nil, func(s *TestAgent) {
2017-07-06 19:49:13 +00:00
// Create the job and register it twice
job := mock.Job()
regReq := structs.JobRegisterRequest{
2017-09-07 23:56:15 +00:00
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2017-07-06 19:49:13 +00:00
}
var regResp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &regReq, &regResp); err != nil {
t.Fatalf("err: %v", err)
}
if err := s.Agent.RPC("Job.Register", &regReq, &regResp); err != nil {
t.Fatalf("err: %v", err)
}
args := structs.JobStabilityRequest{
2017-09-07 23:56:15 +00:00
JobID: job.ID,
JobVersion: 0,
Stable: true,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
2017-07-06 19:49:13 +00:00
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/stable", buf)
if err != nil {
t.Fatalf("err: %v", err)
}
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.JobSpecificRequest(respW, req)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the response
stableResp := obj.(structs.JobStabilityResponse)
2017-07-07 20:55:39 +00:00
if stableResp.Index == 0 {
2017-07-06 19:49:13 +00:00
t.Fatalf("bad: %v", stableResp)
}
// Check for the index
if respW.HeaderMap.Get("X-Nomad-Index") == "" {
t.Fatalf("missing index")
}
})
}
func TestJobs_ParsingWriteRequest(t *testing.T) {
t.Parallel()
// defaults
agentRegion := "agentRegion"
cases := []struct {
name string
jobRegion string
multiregion *api.Multiregion
queryRegion string
queryNamespace string
queryToken string
apiRegion string
apiNamespace string
apiToken string
expectedRequestRegion string
expectedJobRegion string
expectedToken string
expectedNamespace string
}{
{
name: "no region provided at all",
jobRegion: "",
multiregion: nil,
queryRegion: "",
expectedRequestRegion: agentRegion,
expectedJobRegion: agentRegion,
expectedToken: "",
expectedNamespace: "default",
},
{
name: "no region provided but multiregion safe",
jobRegion: "",
multiregion: &api.Multiregion{},
queryRegion: "",
expectedRequestRegion: agentRegion,
expectedJobRegion: api.GlobalRegion,
expectedToken: "",
expectedNamespace: "default",
},
{
name: "region flag provided",
jobRegion: "",
multiregion: nil,
queryRegion: "west",
expectedRequestRegion: "west",
expectedJobRegion: "west",
expectedToken: "",
expectedNamespace: "default",
},
{
name: "job region provided",
jobRegion: "west",
multiregion: nil,
queryRegion: "",
expectedRequestRegion: "west",
expectedJobRegion: "west",
expectedToken: "",
expectedNamespace: "default",
},
{
name: "job region overridden by region flag",
jobRegion: "west",
multiregion: nil,
queryRegion: "east",
expectedRequestRegion: "east",
expectedJobRegion: "east",
expectedToken: "",
expectedNamespace: "default",
},
{
name: "multiregion to valid region",
jobRegion: "",
multiregion: &api.Multiregion{Regions: []*api.MultiregionRegion{
{Name: "west"},
{Name: "east"},
}},
queryRegion: "east",
expectedRequestRegion: "east",
expectedJobRegion: api.GlobalRegion,
expectedToken: "",
expectedNamespace: "default",
},
{
name: "multiregion sent to wrong region",
jobRegion: "",
multiregion: &api.Multiregion{Regions: []*api.MultiregionRegion{
{Name: "west"},
{Name: "east"},
}},
queryRegion: "north",
expectedRequestRegion: "west",
expectedJobRegion: api.GlobalRegion,
expectedToken: "",
expectedNamespace: "default",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
// we need a valid agent config but we don't want to start up
// a real server for this
srv := &HTTPServer{}
srv.agent = &Agent{config: &Config{Region: agentRegion}}
job := &api.Job{
Region: helper.StringToPtr(tc.jobRegion),
Multiregion: tc.multiregion,
}
req, _ := http.NewRequest("POST", "/", nil)
if tc.queryToken != "" {
req.Header.Set("X-Nomad-Token", tc.queryToken)
}
q := req.URL.Query()
if tc.queryNamespace != "" {
q.Add("namespace", tc.queryNamespace)
}
if tc.queryRegion != "" {
q.Add("region", tc.queryRegion)
}
req.URL.RawQuery = q.Encode()
apiReq := api.WriteRequest{
Region: tc.apiRegion,
Namespace: tc.apiNamespace,
SecretID: tc.apiToken,
}
sJob, sWriteReq := srv.apiJobAndRequestToStructs(job, req, apiReq)
require.Equal(t, tc.expectedJobRegion, sJob.Region)
require.Equal(t, tc.expectedNamespace, sJob.Namespace)
require.Equal(t, tc.expectedNamespace, sWriteReq.Namespace)
require.Equal(t, tc.expectedRequestRegion, sWriteReq.Region)
require.Equal(t, tc.expectedToken, sWriteReq.AuthToken)
})
}
}
func TestJobs_RegionForJob(t *testing.T) {
t.Parallel()
// defaults
agentRegion := "agentRegion"
cases := []struct {
name string
jobRegion string
multiregion *api.Multiregion
queryRegion string
apiRegion string
agentRegion string
expectedRequestRegion string
expectedJobRegion string
}{
{
name: "no region provided",
jobRegion: "",
multiregion: nil,
queryRegion: "",
expectedRequestRegion: agentRegion,
expectedJobRegion: agentRegion,
},
{
name: "no region provided but multiregion safe",
jobRegion: "",
multiregion: &api.Multiregion{},
queryRegion: "",
expectedRequestRegion: agentRegion,
expectedJobRegion: api.GlobalRegion,
},
{
name: "region flag provided",
jobRegion: "",
multiregion: nil,
queryRegion: "west",
expectedRequestRegion: "west",
expectedJobRegion: "west",
},
{
name: "job region provided",
jobRegion: "west",
multiregion: nil,
queryRegion: "",
expectedRequestRegion: "west",
expectedJobRegion: "west",
},
{
name: "job region overridden by region flag",
jobRegion: "west",
multiregion: nil,
queryRegion: "east",
expectedRequestRegion: "east",
expectedJobRegion: "east",
},
{
name: "job region overridden by api body",
jobRegion: "west",
multiregion: nil,
apiRegion: "east",
expectedRequestRegion: "east",
expectedJobRegion: "east",
},
{
name: "multiregion to valid region",
jobRegion: "",
multiregion: &api.Multiregion{Regions: []*api.MultiregionRegion{
{Name: "west"},
{Name: "east"},
}},
queryRegion: "east",
expectedRequestRegion: "east",
expectedJobRegion: api.GlobalRegion,
},
{
name: "multiregion sent to wrong region",
jobRegion: "",
multiregion: &api.Multiregion{Regions: []*api.MultiregionRegion{
{Name: "west"},
{Name: "east"},
}},
queryRegion: "north",
expectedRequestRegion: "west",
expectedJobRegion: api.GlobalRegion,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
job := &api.Job{
Region: helper.StringToPtr(tc.jobRegion),
Multiregion: tc.multiregion,
}
requestRegion, jobRegion := regionForJob(
job, tc.queryRegion, tc.apiRegion, agentRegion)
require.Equal(t, tc.expectedRequestRegion, requestRegion)
require.Equal(t, tc.expectedJobRegion, jobRegion)
})
}
}
func TestJobs_NamespaceForJob(t *testing.T) {
t.Parallel()
// test namespace for pointer inputs
ns := "dev"
cases := []struct {
name string
job *api.Job
queryNamespace string
apiNamespace string
expected string
}{
{
name: "no namespace provided",
job: &api.Job{},
expected: structs.DefaultNamespace,
},
{
name: "jobspec has namespace",
job: &api.Job{Namespace: &ns},
expected: "dev",
},
{
name: "-namespace flag overrides empty job namespace",
job: &api.Job{},
queryNamespace: "prod",
expected: "prod",
},
{
name: "-namespace flag overrides job namespace",
job: &api.Job{Namespace: &ns},
queryNamespace: "prod",
expected: "prod",
},
{
name: "-namespace flag overrides job namespace even if default",
job: &api.Job{Namespace: &ns},
queryNamespace: structs.DefaultNamespace,
expected: structs.DefaultNamespace,
},
{
name: "API param overrides empty job namespace",
job: &api.Job{},
apiNamespace: "prod",
expected: "prod",
},
{
name: "-namespace flag overrides API param",
job: &api.Job{Namespace: &ns},
queryNamespace: "prod",
apiNamespace: "whatever",
expected: "prod",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expected,
namespaceForJob(tc.job.Namespace, tc.queryNamespace, tc.apiNamespace),
)
})
}
}
2017-02-13 23:18:17 +00:00
func TestJobs_ApiJobToStructsJob(t *testing.T) {
apiJob := &api.Job{
2017-04-15 03:54:30 +00:00
Stop: helper.BoolToPtr(true),
2017-02-13 23:18:17 +00:00
Region: helper.StringToPtr("global"),
2017-09-07 23:56:15 +00:00
Namespace: helper.StringToPtr("foo"),
2017-02-13 23:18:17 +00:00
ID: helper.StringToPtr("foo"),
ParentID: helper.StringToPtr("lol"),
Name: helper.StringToPtr("name"),
Type: helper.StringToPtr("service"),
Priority: helper.IntToPtr(50),
AllAtOnce: helper.BoolToPtr(true),
Datacenters: []string{"dc1", "dc2"},
Constraints: []*api.Constraint{
{
LTarget: "a",
RTarget: "b",
Operand: "c",
},
},
2018-07-16 13:30:58 +00:00
Affinities: []*api.Affinity{
{
LTarget: "a",
RTarget: "b",
Operand: "c",
2019-01-30 20:46:24 +00:00
Weight: helper.Int8ToPtr(50),
2018-07-16 13:30:58 +00:00
},
},
2017-02-13 23:18:17 +00:00
Update: &api.UpdateStrategy{
2018-03-23 17:56:00 +00:00
Stagger: helper.TimeToPtr(1 * time.Second),
MaxParallel: helper.IntToPtr(5),
HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Manual),
MinHealthyTime: helper.TimeToPtr(1 * time.Minute),
HealthyDeadline: helper.TimeToPtr(3 * time.Minute),
ProgressDeadline: helper.TimeToPtr(3 * time.Minute),
AutoRevert: helper.BoolToPtr(false),
Canary: helper.IntToPtr(1),
2017-02-13 23:18:17 +00:00
},
Spreads: []*api.Spread{
{
Attribute: "${meta.rack}",
2019-01-30 20:46:24 +00:00
Weight: helper.Int8ToPtr(100),
SpreadTarget: []*api.SpreadTarget{
{
Value: "r1",
Percent: 50,
},
},
},
},
2017-02-13 23:18:17 +00:00
Periodic: &api.PeriodicConfig{
Enabled: helper.BoolToPtr(true),
Spec: helper.StringToPtr("spec"),
SpecType: helper.StringToPtr("cron"),
ProhibitOverlap: helper.BoolToPtr(true),
2017-02-21 00:43:28 +00:00
TimeZone: helper.StringToPtr("test zone"),
2017-02-13 23:18:17 +00:00
},
ParameterizedJob: &api.ParameterizedJobConfig{
Payload: "payload",
MetaRequired: []string{"a", "b"},
MetaOptional: []string{"c", "d"},
},
Payload: []byte("payload"),
Meta: map[string]string{
"foo": "bar",
},
Multiregion: &api.Multiregion{
Strategy: &api.MultiregionStrategy{
MaxParallel: helper.IntToPtr(2),
OnFailure: helper.StringToPtr("fail_all"),
},
Regions: []*api.MultiregionRegion{
{
Name: "west",
Count: helper.IntToPtr(1),
Datacenters: []string{"dc1", "dc2"},
Meta: map[string]string{"region_code": "W"},
},
},
},
2017-02-13 23:18:17 +00:00
TaskGroups: []*api.TaskGroup{
{
Name: helper.StringToPtr("group1"),
Count: helper.IntToPtr(5),
Constraints: []*api.Constraint{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
},
},
2018-07-16 13:30:58 +00:00
Affinities: []*api.Affinity{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
2019-01-30 20:46:24 +00:00
Weight: helper.Int8ToPtr(100),
2018-07-16 13:30:58 +00:00
},
},
2017-02-13 23:18:17 +00:00
RestartPolicy: &api.RestartPolicy{
Interval: helper.TimeToPtr(1 * time.Second),
Attempts: helper.IntToPtr(5),
Delay: helper.TimeToPtr(10 * time.Second),
Mode: helper.StringToPtr("delay"),
},
2018-01-18 21:27:34 +00:00
ReschedulePolicy: &api.ReschedulePolicy{
Interval: helper.TimeToPtr(12 * time.Hour),
Attempts: helper.IntToPtr(5),
2018-03-26 19:45:09 +00:00
DelayFunction: helper.StringToPtr("constant"),
Delay: helper.TimeToPtr(30 * time.Second),
Unlimited: helper.BoolToPtr(true),
2018-03-13 15:06:26 +00:00
MaxDelay: helper.TimeToPtr(20 * time.Minute),
2018-01-18 21:27:34 +00:00
},
2018-03-01 19:21:32 +00:00
Migrate: &api.MigrateStrategy{
MaxParallel: helper.IntToPtr(12),
HealthCheck: helper.StringToPtr("task_events"),
MinHealthyTime: helper.TimeToPtr(12 * time.Hour),
HealthyDeadline: helper.TimeToPtr(12 * time.Hour),
},
Spreads: []*api.Spread{
{
Attribute: "${node.datacenter}",
2019-01-30 20:46:24 +00:00
Weight: helper.Int8ToPtr(100),
SpreadTarget: []*api.SpreadTarget{
{
Value: "dc1",
Percent: 100,
},
},
},
},
2017-02-13 23:18:17 +00:00
EphemeralDisk: &api.EphemeralDisk{
SizeMB: helper.IntToPtr(100),
Sticky: helper.BoolToPtr(true),
Migrate: helper.BoolToPtr(true),
},
Update: &api.UpdateStrategy{
2018-03-23 17:56:00 +00:00
HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Checks),
MinHealthyTime: helper.TimeToPtr(2 * time.Minute),
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
ProgressDeadline: helper.TimeToPtr(5 * time.Minute),
AutoRevert: helper.BoolToPtr(true),
},
2017-02-13 23:18:17 +00:00
Meta: map[string]string{
"key": "value",
},
Consul: &api.Consul{
Namespace: "team-foo",
},
Services: []*api.Service{
{
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
Name: "groupserviceA",
Tags: []string{"a", "b"},
CanaryTags: []string{"d", "e"},
EnableTagOverride: true,
PortLabel: "1234",
Meta: map[string]string{
"servicemeta": "foobar",
},
CheckRestart: &api.CheckRestart{
Limit: 4,
Grace: helper.TimeToPtr(11 * time.Second),
},
Checks: []api.ServiceCheck{
{
Id: "hello",
Name: "bar",
Type: "http",
Command: "foo",
Args: []string{"a", "b"},
Path: "/check",
Protocol: "http",
Method: "POST",
Body: "{\"check\":\"mem\"}",
PortLabel: "foo",
AddressMode: "driver",
GRPCService: "foo.Bar",
GRPCUseTLS: true,
Interval: 4 * time.Second,
Timeout: 2 * time.Second,
InitialStatus: "ok",
CheckRestart: &api.CheckRestart{
Limit: 3,
IgnoreWarnings: true,
},
TaskName: "task1",
SuccessBeforePassing: 2,
FailuresBeforeCritical: 3,
},
},
Connect: &api.ConsulConnect{
Native: false,
SidecarService: &api.ConsulSidecarService{
Tags: []string{"f", "g"},
Port: "9000",
DisableDefaultTCPCheck: true,
},
},
},
},
2017-02-13 23:18:17 +00:00
Tasks: []*api.Task{
{
Name: "task1",
2017-02-21 00:43:28 +00:00
Leader: true,
2017-02-13 23:18:17 +00:00
Driver: "docker",
User: "mary",
Config: map[string]interface{}{
"lol": "code",
},
Env: map[string]string{
"hello": "world",
},
Constraints: []*api.Constraint{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
},
},
2018-07-16 13:30:58 +00:00
Affinities: []*api.Affinity{
{
LTarget: "a",
RTarget: "b",
Operand: "c",
2019-01-30 20:46:24 +00:00
Weight: helper.Int8ToPtr(50),
2018-07-16 13:30:58 +00:00
},
},
VolumeMounts: []*api.VolumeMount{
{
Volume: helper.StringToPtr("vol"),
Destination: helper.StringToPtr("dest"),
ReadOnly: helper.BoolToPtr(false),
PropagationMode: helper.StringToPtr("a"),
},
},
2020-03-07 02:52:58 +00:00
RestartPolicy: &api.RestartPolicy{
Interval: helper.TimeToPtr(2 * time.Second),
Attempts: helper.IntToPtr(10),
Delay: helper.TimeToPtr(20 * time.Second),
Mode: helper.StringToPtr("delay"),
},
2017-03-01 23:30:01 +00:00
Services: []*api.Service{
2017-02-13 23:18:17 +00:00
{
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
Id: "id",
Name: "serviceA",
Tags: []string{"1", "2"},
CanaryTags: []string{"3", "4"},
EnableTagOverride: true,
PortLabel: "foo",
Meta: map[string]string{
"servicemeta": "foobar",
},
CheckRestart: &api.CheckRestart{
Limit: 4,
Grace: helper.TimeToPtr(11 * time.Second),
},
2017-02-13 23:18:17 +00:00
Checks: []api.ServiceCheck{
{
Id: "hello",
Name: "bar",
Type: "http",
Command: "foo",
Args: []string{"a", "b"},
Path: "/check",
Protocol: "http",
PortLabel: "foo",
AddressMode: "driver",
GRPCService: "foo.Bar",
GRPCUseTLS: true,
Interval: 4 * time.Second,
Timeout: 2 * time.Second,
InitialStatus: "ok",
SuccessBeforePassing: 3,
FailuresBeforeCritical: 4,
CheckRestart: &api.CheckRestart{
Limit: 3,
IgnoreWarnings: true,
},
2017-02-13 23:18:17 +00:00
},
{
Id: "check2id",
Name: "check2",
Type: "tcp",
PortLabel: "foo",
Interval: 4 * time.Second,
Timeout: 2 * time.Second,
},
2017-02-13 23:18:17 +00:00
},
},
},
Resources: &api.Resources{
CPU: helper.IntToPtr(100),
MemoryMB: helper.IntToPtr(10),
Networks: []*api.NetworkResource{
{
IP: "10.10.11.1",
MBits: helper.IntToPtr(10),
Hostname: "foobar",
2017-02-13 23:18:17 +00:00
ReservedPorts: []api.Port{
{
Label: "http",
Value: 80,
},
},
DynamicPorts: []api.Port{
{
Label: "ssh",
Value: 2000,
},
},
},
},
2018-10-08 22:38:03 +00:00
Devices: []*api.RequestedDevice{
{
Name: "nvidia/gpu",
2018-10-08 23:09:41 +00:00
Count: helper.Uint64ToPtr(4),
Constraints: []*api.Constraint{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
},
},
Affinities: []*api.Affinity{
{
LTarget: "a",
RTarget: "b",
Operand: "c",
2019-01-30 20:46:24 +00:00
Weight: helper.Int8ToPtr(50),
},
},
2018-10-08 22:38:03 +00:00
},
{
Name: "gpu",
2018-10-08 23:09:41 +00:00
Count: nil,
2018-10-08 22:38:03 +00:00
},
},
2017-02-13 23:18:17 +00:00
},
Meta: map[string]string{
"lol": "code",
},
KillTimeout: helper.TimeToPtr(10 * time.Second),
2017-12-06 21:37:47 +00:00
KillSignal: "SIGQUIT",
2017-02-13 23:18:17 +00:00
LogConfig: &api.LogConfig{
MaxFiles: helper.IntToPtr(10),
MaxFileSizeMB: helper.IntToPtr(100),
},
Artifacts: []*api.TaskArtifact{
{
GetterSource: helper.StringToPtr("source"),
GetterOptions: map[string]string{
"a": "b",
},
2017-07-06 17:17:19 +00:00
GetterMode: helper.StringToPtr("dir"),
2017-02-13 23:18:17 +00:00
RelativeDest: helper.StringToPtr("dest"),
},
},
Vault: &api.Vault{
Namespace: helper.StringToPtr("ns1"),
2017-02-13 23:18:17 +00:00
Policies: []string{"a", "b", "c"},
Env: helper.BoolToPtr(true),
ChangeMode: helper.StringToPtr("c"),
ChangeSignal: helper.StringToPtr("sighup"),
},
Templates: []*api.Template{
{
SourcePath: helper.StringToPtr("source"),
DestPath: helper.StringToPtr("dest"),
EmbeddedTmpl: helper.StringToPtr("embedded"),
ChangeMode: helper.StringToPtr("change"),
ChangeSignal: helper.StringToPtr("signal"),
Splay: helper.TimeToPtr(1 * time.Minute),
Perms: helper.StringToPtr("666"),
2017-02-21 00:43:28 +00:00
LeftDelim: helper.StringToPtr("abc"),
RightDelim: helper.StringToPtr("def"),
Envvars: helper.BoolToPtr(true),
2017-02-13 23:18:17 +00:00
},
},
DispatchPayload: &api.DispatchPayloadConfig{
File: "fileA",
},
},
},
},
},
ConsulToken: helper.StringToPtr("abc123"),
VaultToken: helper.StringToPtr("def456"),
VaultNamespace: helper.StringToPtr("ghi789"),
2017-02-13 23:18:17 +00:00
Status: helper.StringToPtr("status"),
StatusDescription: helper.StringToPtr("status_desc"),
2017-04-15 03:54:30 +00:00
Version: helper.Uint64ToPtr(10),
2017-02-13 23:18:17 +00:00
CreateIndex: helper.Uint64ToPtr(1),
ModifyIndex: helper.Uint64ToPtr(3),
JobModifyIndex: helper.Uint64ToPtr(5),
}
expected := &structs.Job{
Stop: true,
Region: "global",
Namespace: "foo",
VaultNamespace: "ghi789",
ID: "foo",
Name: "name",
Type: "service",
Priority: 50,
AllAtOnce: true,
Datacenters: []string{"dc1", "dc2"},
2017-02-13 23:18:17 +00:00
Constraints: []*structs.Constraint{
{
LTarget: "a",
RTarget: "b",
Operand: "c",
},
},
2018-07-16 13:30:58 +00:00
Affinities: []*structs.Affinity{
{
LTarget: "a",
RTarget: "b",
Operand: "c",
Weight: 50,
},
},
Spreads: []*structs.Spread{
{
Attribute: "${meta.rack}",
Weight: 100,
SpreadTarget: []*structs.SpreadTarget{
{
Value: "r1",
Percent: 50,
},
},
},
},
2017-02-13 23:18:17 +00:00
Update: structs.UpdateStrategy{
Stagger: 1 * time.Second,
MaxParallel: 5,
},
Periodic: &structs.PeriodicConfig{
Enabled: true,
Spec: "spec",
SpecType: "cron",
ProhibitOverlap: true,
2017-02-21 00:43:28 +00:00
TimeZone: "test zone",
2017-02-13 23:18:17 +00:00
},
ParameterizedJob: &structs.ParameterizedJobConfig{
Payload: "payload",
MetaRequired: []string{"a", "b"},
MetaOptional: []string{"c", "d"},
},
Payload: []byte("payload"),
Meta: map[string]string{
"foo": "bar",
},
Multiregion: &structs.Multiregion{
Strategy: &structs.MultiregionStrategy{
MaxParallel: 2,
OnFailure: "fail_all",
},
Regions: []*structs.MultiregionRegion{
{
Name: "west",
Count: 1,
Datacenters: []string{"dc1", "dc2"},
Meta: map[string]string{"region_code": "W"},
},
},
},
2017-02-13 23:18:17 +00:00
TaskGroups: []*structs.TaskGroup{
{
Name: "group1",
Count: 5,
Constraints: []*structs.Constraint{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
},
},
2018-07-16 13:30:58 +00:00
Affinities: []*structs.Affinity{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
Weight: 100,
},
},
2017-02-13 23:18:17 +00:00
RestartPolicy: &structs.RestartPolicy{
Interval: 1 * time.Second,
Attempts: 5,
Delay: 10 * time.Second,
Mode: "delay",
},
Spreads: []*structs.Spread{
{
Attribute: "${node.datacenter}",
Weight: 100,
SpreadTarget: []*structs.SpreadTarget{
{
Value: "dc1",
Percent: 100,
},
},
},
},
2018-01-18 21:27:34 +00:00
ReschedulePolicy: &structs.ReschedulePolicy{
Interval: 12 * time.Hour,
Attempts: 5,
2018-03-26 19:45:09 +00:00
DelayFunction: "constant",
Delay: 30 * time.Second,
Unlimited: true,
2018-03-13 15:06:26 +00:00
MaxDelay: 20 * time.Minute,
2018-01-18 21:27:34 +00:00
},
2018-03-01 19:21:32 +00:00
Migrate: &structs.MigrateStrategy{
MaxParallel: 12,
HealthCheck: "task_events",
MinHealthyTime: 12 * time.Hour,
HealthyDeadline: 12 * time.Hour,
},
2017-02-13 23:18:17 +00:00
EphemeralDisk: &structs.EphemeralDisk{
SizeMB: 100,
Sticky: true,
Migrate: true,
},
Update: &structs.UpdateStrategy{
2018-03-23 17:56:00 +00:00
Stagger: 1 * time.Second,
MaxParallel: 5,
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 2 * time.Minute,
HealthyDeadline: 5 * time.Minute,
ProgressDeadline: 5 * time.Minute,
AutoRevert: true,
2019-05-09 13:42:18 +00:00
AutoPromote: false,
2018-03-23 17:56:00 +00:00
Canary: 1,
},
2017-02-13 23:18:17 +00:00
Meta: map[string]string{
"key": "value",
},
Consul: &structs.Consul{
Namespace: "team-foo",
},
Services: []*structs.Service{
{
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
Name: "groupserviceA",
Tags: []string{"a", "b"},
CanaryTags: []string{"d", "e"},
EnableTagOverride: true,
PortLabel: "1234",
AddressMode: "auto",
Meta: map[string]string{
"servicemeta": "foobar",
},
OnUpdate: "require_healthy",
Checks: []*structs.ServiceCheck{
{
Name: "bar",
Type: "http",
Command: "foo",
Args: []string{"a", "b"},
Path: "/check",
Protocol: "http",
Method: "POST",
Body: "{\"check\":\"mem\"}",
PortLabel: "foo",
AddressMode: "driver",
GRPCService: "foo.Bar",
GRPCUseTLS: true,
Interval: 4 * time.Second,
Timeout: 2 * time.Second,
InitialStatus: "ok",
CheckRestart: &structs.CheckRestart{
Grace: 11 * time.Second,
Limit: 3,
IgnoreWarnings: true,
},
TaskName: "task1",
OnUpdate: "require_healthy",
SuccessBeforePassing: 2,
FailuresBeforeCritical: 3,
},
},
Connect: &structs.ConsulConnect{
Native: false,
SidecarService: &structs.ConsulSidecarService{
Tags: []string{"f", "g"},
Port: "9000",
DisableDefaultTCPCheck: true,
},
},
},
},
2017-02-13 23:18:17 +00:00
Tasks: []*structs.Task{
{
Name: "task1",
Driver: "docker",
2017-02-21 00:43:28 +00:00
Leader: true,
2017-02-13 23:18:17 +00:00
User: "mary",
Config: map[string]interface{}{
"lol": "code",
},
Constraints: []*structs.Constraint{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
},
},
2018-07-16 13:30:58 +00:00
Affinities: []*structs.Affinity{
{
LTarget: "a",
RTarget: "b",
Operand: "c",
Weight: 50,
},
},
2017-02-13 23:18:17 +00:00
Env: map[string]string{
"hello": "world",
},
VolumeMounts: []*structs.VolumeMount{
{
Volume: "vol",
Destination: "dest",
ReadOnly: false,
PropagationMode: "a",
},
},
2020-03-07 02:52:58 +00:00
RestartPolicy: &structs.RestartPolicy{
Interval: 2 * time.Second,
Attempts: 10,
Delay: 20 * time.Second,
Mode: "delay",
},
2017-02-13 23:18:17 +00:00
Services: []*structs.Service{
2017-09-26 22:26:33 +00:00
{
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
Name: "serviceA",
Tags: []string{"1", "2"},
CanaryTags: []string{"3", "4"},
EnableTagOverride: true,
PortLabel: "foo",
AddressMode: "auto",
Meta: map[string]string{
"servicemeta": "foobar",
},
OnUpdate: "require_healthy",
2017-02-13 23:18:17 +00:00
Checks: []*structs.ServiceCheck{
2017-09-26 22:26:33 +00:00
{
Name: "bar",
Type: "http",
Command: "foo",
Args: []string{"a", "b"},
Path: "/check",
Protocol: "http",
PortLabel: "foo",
AddressMode: "driver",
Interval: 4 * time.Second,
Timeout: 2 * time.Second,
InitialStatus: "ok",
GRPCService: "foo.Bar",
GRPCUseTLS: true,
SuccessBeforePassing: 3,
FailuresBeforeCritical: 4,
CheckRestart: &structs.CheckRestart{
Limit: 3,
Grace: 11 * time.Second,
IgnoreWarnings: true,
},
OnUpdate: "require_healthy",
2017-02-13 23:18:17 +00:00
},
{
Name: "check2",
Type: "tcp",
PortLabel: "foo",
Interval: 4 * time.Second,
Timeout: 2 * time.Second,
CheckRestart: &structs.CheckRestart{
Limit: 4,
Grace: 11 * time.Second,
},
OnUpdate: "require_healthy",
},
2017-02-13 23:18:17 +00:00
},
},
},
Resources: &structs.Resources{
CPU: 100,
MemoryMB: 10,
Networks: []*structs.NetworkResource{
{
IP: "10.10.11.1",
MBits: 10,
Hostname: "foobar",
2017-02-13 23:18:17 +00:00
ReservedPorts: []structs.Port{
{
Label: "http",
Value: 80,
},
},
DynamicPorts: []structs.Port{
{
Label: "ssh",
Value: 2000,
},
},
},
},
2018-10-08 22:38:03 +00:00
Devices: []*structs.RequestedDevice{
{
Name: "nvidia/gpu",
Count: 4,
Constraints: []*structs.Constraint{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
},
},
Affinities: []*structs.Affinity{
{
LTarget: "a",
RTarget: "b",
Operand: "c",
Weight: 50,
},
},
2018-10-08 22:38:03 +00:00
},
{
Name: "gpu",
Count: 1,
},
},
2017-02-13 23:18:17 +00:00
},
Meta: map[string]string{
"lol": "code",
},
KillTimeout: 10 * time.Second,
2017-12-06 21:37:47 +00:00
KillSignal: "SIGQUIT",
2017-02-13 23:18:17 +00:00
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 100,
},
Artifacts: []*structs.TaskArtifact{
{
GetterSource: "source",
GetterOptions: map[string]string{
"a": "b",
},
2017-07-06 17:17:19 +00:00
GetterMode: "dir",
2017-02-13 23:18:17 +00:00
RelativeDest: "dest",
},
},
Vault: &structs.Vault{
Namespace: "ns1",
2017-02-13 23:18:17 +00:00
Policies: []string{"a", "b", "c"},
Env: true,
ChangeMode: "c",
ChangeSignal: "sighup",
},
Templates: []*structs.Template{
{
SourcePath: "source",
DestPath: "dest",
EmbeddedTmpl: "embedded",
ChangeMode: "change",
ChangeSignal: "SIGNAL",
Splay: 1 * time.Minute,
Perms: "666",
2017-02-21 00:43:28 +00:00
LeftDelim: "abc",
RightDelim: "def",
Envvars: true,
2017-02-13 23:18:17 +00:00
},
},
DispatchPayload: &structs.DispatchPayloadConfig{
File: "fileA",
},
},
},
},
},
ConsulToken: "abc123",
VaultToken: "def456",
2017-02-13 23:18:17 +00:00
}
structsJob := ApiJobToStructJob(apiJob)
2017-02-13 23:18:17 +00:00
require.Equal(t, expected, structsJob)
systemAPIJob := &api.Job{
Stop: helper.BoolToPtr(true),
Region: helper.StringToPtr("global"),
Namespace: helper.StringToPtr("foo"),
ID: helper.StringToPtr("foo"),
ParentID: helper.StringToPtr("lol"),
Name: helper.StringToPtr("name"),
Type: helper.StringToPtr("system"),
Priority: helper.IntToPtr(50),
AllAtOnce: helper.BoolToPtr(true),
Datacenters: []string{"dc1", "dc2"},
Constraints: []*api.Constraint{
{
LTarget: "a",
RTarget: "b",
Operand: "c",
},
},
TaskGroups: []*api.TaskGroup{
{
Name: helper.StringToPtr("group1"),
Count: helper.IntToPtr(5),
Constraints: []*api.Constraint{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
},
},
RestartPolicy: &api.RestartPolicy{
Interval: helper.TimeToPtr(1 * time.Second),
Attempts: helper.IntToPtr(5),
Delay: helper.TimeToPtr(10 * time.Second),
Mode: helper.StringToPtr("delay"),
},
EphemeralDisk: &api.EphemeralDisk{
SizeMB: helper.IntToPtr(100),
Sticky: helper.BoolToPtr(true),
Migrate: helper.BoolToPtr(true),
},
Meta: map[string]string{
"key": "value",
},
Consul: &api.Consul{
Namespace: "foo",
},
Tasks: []*api.Task{
{
Name: "task1",
Leader: true,
Driver: "docker",
User: "mary",
Config: map[string]interface{}{
"lol": "code",
},
Env: map[string]string{
"hello": "world",
},
Constraints: []*api.Constraint{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
},
},
Resources: &api.Resources{
CPU: helper.IntToPtr(100),
MemoryMB: helper.IntToPtr(10),
Networks: []*api.NetworkResource{
{
IP: "10.10.11.1",
MBits: helper.IntToPtr(10),
ReservedPorts: []api.Port{
{
Label: "http",
Value: 80,
},
},
DynamicPorts: []api.Port{
{
Label: "ssh",
Value: 2000,
},
},
},
},
},
Meta: map[string]string{
"lol": "code",
},
KillTimeout: helper.TimeToPtr(10 * time.Second),
KillSignal: "SIGQUIT",
LogConfig: &api.LogConfig{
MaxFiles: helper.IntToPtr(10),
MaxFileSizeMB: helper.IntToPtr(100),
},
Artifacts: []*api.TaskArtifact{
{
GetterSource: helper.StringToPtr("source"),
GetterOptions: map[string]string{"a": "b"},
GetterHeaders: map[string]string{"User-Agent": "nomad"},
GetterMode: helper.StringToPtr("dir"),
RelativeDest: helper.StringToPtr("dest"),
},
},
DispatchPayload: &api.DispatchPayloadConfig{
File: "fileA",
},
},
},
},
},
Status: helper.StringToPtr("status"),
StatusDescription: helper.StringToPtr("status_desc"),
Version: helper.Uint64ToPtr(10),
CreateIndex: helper.Uint64ToPtr(1),
ModifyIndex: helper.Uint64ToPtr(3),
JobModifyIndex: helper.Uint64ToPtr(5),
}
expectedSystemJob := &structs.Job{
Stop: true,
Region: "global",
Namespace: "foo",
ID: "foo",
Name: "name",
Type: "system",
Priority: 50,
AllAtOnce: true,
Datacenters: []string{"dc1", "dc2"},
Constraints: []*structs.Constraint{
{
LTarget: "a",
RTarget: "b",
Operand: "c",
},
},
TaskGroups: []*structs.TaskGroup{
{
Name: "group1",
Count: 5,
Constraints: []*structs.Constraint{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
},
},
RestartPolicy: &structs.RestartPolicy{
Interval: 1 * time.Second,
Attempts: 5,
Delay: 10 * time.Second,
Mode: "delay",
},
EphemeralDisk: &structs.EphemeralDisk{
SizeMB: 100,
Sticky: true,
Migrate: true,
},
Meta: map[string]string{
"key": "value",
},
Consul: &structs.Consul{
Namespace: "foo",
},
Tasks: []*structs.Task{
{
Name: "task1",
Driver: "docker",
Leader: true,
User: "mary",
Config: map[string]interface{}{
"lol": "code",
},
Constraints: []*structs.Constraint{
{
LTarget: "x",
RTarget: "y",
Operand: "z",
},
},
Env: map[string]string{
"hello": "world",
},
Resources: &structs.Resources{
CPU: 100,
MemoryMB: 10,
Networks: []*structs.NetworkResource{
{
IP: "10.10.11.1",
MBits: 10,
ReservedPorts: []structs.Port{
{
Label: "http",
Value: 80,
},
},
DynamicPorts: []structs.Port{
{
Label: "ssh",
Value: 2000,
},
},
},
},
},
2020-03-07 02:52:58 +00:00
RestartPolicy: &structs.RestartPolicy{
Interval: 1 * time.Second,
Attempts: 5,
Delay: 10 * time.Second,
Mode: "delay",
},
Meta: map[string]string{
"lol": "code",
},
KillTimeout: 10 * time.Second,
KillSignal: "SIGQUIT",
LogConfig: &structs.LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 100,
},
Artifacts: []*structs.TaskArtifact{
{
GetterSource: "source",
GetterOptions: map[string]string{"a": "b"},
GetterHeaders: map[string]string{"User-Agent": "nomad"},
GetterMode: "dir",
RelativeDest: "dest",
},
},
DispatchPayload: &structs.DispatchPayloadConfig{
File: "fileA",
},
},
},
},
},
}
systemStructsJob := ApiJobToStructJob(systemAPIJob)
require.Equal(t, expectedSystemJob, systemStructsJob)
2017-02-13 23:18:17 +00:00
}
func TestJobs_ApiJobToStructsJobUpdate(t *testing.T) {
apiJob := &api.Job{
Update: &api.UpdateStrategy{
Stagger: helper.TimeToPtr(1 * time.Second),
MaxParallel: helper.IntToPtr(5),
HealthCheck: helper.StringToPtr(structs.UpdateStrategyHealthCheck_Manual),
MinHealthyTime: helper.TimeToPtr(1 * time.Minute),
HealthyDeadline: helper.TimeToPtr(3 * time.Minute),
ProgressDeadline: helper.TimeToPtr(3 * time.Minute),
AutoRevert: helper.BoolToPtr(false),
AutoPromote: nil,
Canary: helper.IntToPtr(1),
},
TaskGroups: []*api.TaskGroup{
{
Update: &api.UpdateStrategy{
Canary: helper.IntToPtr(2),
AutoRevert: helper.BoolToPtr(true),
},
}, {
Update: &api.UpdateStrategy{
Canary: helper.IntToPtr(3),
AutoPromote: helper.BoolToPtr(true),
},
},
},
}
structsJob := ApiJobToStructJob(apiJob)
// Update has been moved from job down to the groups
jobUpdate := structs.UpdateStrategy{
Stagger: 1000000000,
MaxParallel: 5,
HealthCheck: "",
MinHealthyTime: 0,
HealthyDeadline: 0,
ProgressDeadline: 0,
AutoRevert: false,
AutoPromote: false,
Canary: 0,
}
// But the groups inherit settings from the job update
group1 := structs.UpdateStrategy{
Stagger: 1000000000,
MaxParallel: 5,
HealthCheck: "manual",
MinHealthyTime: 60000000000,
HealthyDeadline: 180000000000,
ProgressDeadline: 180000000000,
AutoRevert: true,
AutoPromote: false,
Canary: 2,
}
group2 := structs.UpdateStrategy{
Stagger: 1000000000,
MaxParallel: 5,
HealthCheck: "manual",
MinHealthyTime: 60000000000,
HealthyDeadline: 180000000000,
ProgressDeadline: 180000000000,
AutoRevert: false,
AutoPromote: true,
Canary: 3,
}
require.Equal(t, jobUpdate, structsJob.Update)
require.Equal(t, group1, *structsJob.TaskGroups[0].Update)
require.Equal(t, group2, *structsJob.TaskGroups[1].Update)
}
// TestJobs_Matching_Resources asserts:
// api.{Default,Min}Resources == structs.{Default,Min}Resources
//
// While this is an odd place to test that, this is where both are imported,
// validated, and converted.
func TestJobs_Matching_Resources(t *testing.T) {
t.Parallel()
// api.MinResources == structs.MinResources
structsMinRes := ApiResourcesToStructs(api.MinResources())
assert.Equal(t, structs.MinResources(), structsMinRes)
// api.DefaultResources == structs.DefaultResources
structsDefaultRes := ApiResourcesToStructs(api.DefaultResources())
assert.Equal(t, structs.DefaultResources(), structsDefaultRes)
}
// TestHTTP_JobValidate_SystemMigrate asserts that a system job with a migrate
// stanza fails to validate but does not panic (see #5477).
func TestHTTP_JobValidate_SystemMigrate(t *testing.T) {
t.Parallel()
httpTest(t, nil, func(s *TestAgent) {
// Create the job
job := &api.Job{
Region: helper.StringToPtr("global"),
Datacenters: []string{"dc1"},
ID: helper.StringToPtr("systemmigrate"),
Name: helper.StringToPtr("systemmigrate"),
TaskGroups: []*api.TaskGroup{
{Name: helper.StringToPtr("web")},
},
// System job...
Type: helper.StringToPtr("system"),
// ...with an empty migrate stanza
Migrate: &api.MigrateStrategy{},
}
args := api.JobValidateRequest{
Job: job,
WriteRequest: api.WriteRequest{Region: "global"},
}
buf := encodeReq(args)
// Make the HTTP request
req, err := http.NewRequest("PUT", "/v1/validate/job", buf)
require.NoError(t, err)
respW := httptest.NewRecorder()
// Make the request
obj, err := s.Server.ValidateJobRequest(respW, req)
require.NoError(t, err)
// Check the response
resp := obj.(structs.JobValidateResponse)
require.Contains(t, resp.Error, `Job type "system" does not allow migrate block`)
})
}
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
func TestConversion_dereferenceInt(t *testing.T) {
t.Parallel()
require.Equal(t, 0, dereferenceInt(nil))
require.Equal(t, 42, dereferenceInt(helper.IntToPtr(42)))
}
func TestConversion_apiLogConfigToStructs(t *testing.T) {
t.Parallel()
require.Nil(t, apiLogConfigToStructs(nil))
require.Equal(t, &structs.LogConfig{
MaxFiles: 2,
MaxFileSizeMB: 8,
}, apiLogConfigToStructs(&api.LogConfig{
MaxFiles: helper.IntToPtr(2),
MaxFileSizeMB: helper.IntToPtr(8),
}))
}
func TestConversion_apiResourcesToStructs(t *testing.T) {
t.Parallel()
cases := []struct {
name string
input *api.Resources
expected *structs.Resources
}{
{
"nil",
nil,
nil,
},
{
"plain",
&api.Resources{
CPU: helper.IntToPtr(100),
MemoryMB: helper.IntToPtr(200),
},
&structs.Resources{
CPU: 100,
MemoryMB: 200,
},
},
{
"with memory max",
&api.Resources{
CPU: helper.IntToPtr(100),
MemoryMB: helper.IntToPtr(200),
MemoryMaxMB: helper.IntToPtr(300),
},
&structs.Resources{
CPU: 100,
MemoryMB: 200,
MemoryMaxMB: 300,
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
found := ApiResourcesToStructs(c.input)
require.Equal(t, c.expected, found)
})
}
}
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
func TestConversion_apiConnectSidecarTaskToStructs(t *testing.T) {
t.Parallel()
require.Nil(t, apiConnectSidecarTaskToStructs(nil))
delay := time.Duration(200)
timeout := time.Duration(1000)
config := make(map[string]interface{})
env := make(map[string]string)
meta := make(map[string]string)
require.Equal(t, &structs.SidecarTask{
Name: "name",
Driver: "driver",
User: "user",
Config: config,
Env: env,
Resources: &structs.Resources{
CPU: 1,
MemoryMB: 128,
},
Meta: meta,
KillTimeout: &timeout,
LogConfig: &structs.LogConfig{
MaxFiles: 2,
MaxFileSizeMB: 8,
},
ShutdownDelay: &delay,
KillSignal: "SIGTERM",
}, apiConnectSidecarTaskToStructs(&api.SidecarTask{
Name: "name",
Driver: "driver",
User: "user",
Config: config,
Env: env,
Resources: &api.Resources{
CPU: helper.IntToPtr(1),
MemoryMB: helper.IntToPtr(128),
},
Meta: meta,
KillTimeout: &timeout,
LogConfig: &api.LogConfig{
MaxFiles: helper.IntToPtr(2),
MaxFileSizeMB: helper.IntToPtr(8),
},
ShutdownDelay: &delay,
KillSignal: "SIGTERM",
}))
}
func TestConversion_apiConsulExposePathsToStructs(t *testing.T) {
t.Parallel()
require.Nil(t, apiConsulExposePathsToStructs(nil))
require.Nil(t, apiConsulExposePathsToStructs(make([]*api.ConsulExposePath, 0)))
require.Equal(t, []structs.ConsulExposePath{{
Path: "/health",
Protocol: "http",
LocalPathPort: 8080,
ListenerPort: "hcPort",
}}, apiConsulExposePathsToStructs([]*api.ConsulExposePath{{
Path: "/health",
Protocol: "http",
LocalPathPort: 8080,
ListenerPort: "hcPort",
}}))
}
func TestConversion_apiConsulExposeConfigToStructs(t *testing.T) {
t.Parallel()
require.Nil(t, apiConsulExposeConfigToStructs(nil))
require.Equal(t, &structs.ConsulExposeConfig{
Paths: []structs.ConsulExposePath{{Path: "/health"}},
}, apiConsulExposeConfigToStructs(&api.ConsulExposeConfig{
Path: []*api.ConsulExposePath{{Path: "/health"}},
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
}))
}
func TestConversion_apiUpstreamsToStructs(t *testing.T) {
t.Parallel()
require.Nil(t, apiUpstreamsToStructs(nil))
require.Nil(t, apiUpstreamsToStructs(make([]*api.ConsulUpstream, 0)))
require.Equal(t, []structs.ConsulUpstream{{
DestinationName: "upstream",
LocalBindPort: 8000,
Datacenter: "dc2",
LocalBindAddress: "127.0.0.2",
consul/connect: add support for connect mesh gateways This PR implements first-class support for Nomad running Consul Connect Mesh Gateways. Mesh gateways enable services in the Connect mesh to make cross-DC connections via gateways, where each datacenter may not have full node interconnectivity. Consul docs with more information: https://www.consul.io/docs/connect/gateways/mesh-gateway The following group level service block can be used to establish a Connect mesh gateway. service { connect { gateway { mesh { // no configuration } } } } Services can make use of a mesh gateway by configuring so in their upstream blocks, e.g. service { connect { sidecar_service { proxy { upstreams { destination_name = "<service>" local_bind_port = <port> datacenter = "<datacenter>" mesh_gateway { mode = "<mode>" } } } } } } Typical use of a mesh gateway is to create a bridge between datacenters. A mesh gateway should then be configured with a service port that is mapped from a host_network configured on a WAN interface in Nomad agent config, e.g. client { host_network "public" { interface = "eth1" } } Create a port mapping in the group.network block for use by the mesh gateway service from the public host_network, e.g. network { mode = "bridge" port "mesh_wan" { host_network = "public" } } Use this port label for the service.port of the mesh gateway, e.g. service { name = "mesh-gateway" port = "mesh_wan" connect { gateway { mesh {} } } } Currently Envoy is the only supported gateway implementation in Consul. By default Nomad client will run the latest official Envoy docker image supported by the local Consul agent. The Envoy task can be customized by setting `meta.connect.gateway_image` in agent config or by setting the `connect.sidecar_task` block. Gateways require Consul 1.8.0+, enforced by the Nomad scheduler. Closes #9446
2021-04-12 19:10:10 +00:00
MeshGateway: &structs.ConsulMeshGateway{Mode: "local"},
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
}}, apiUpstreamsToStructs([]*api.ConsulUpstream{{
DestinationName: "upstream",
LocalBindPort: 8000,
Datacenter: "dc2",
LocalBindAddress: "127.0.0.2",
consul/connect: add support for connect mesh gateways This PR implements first-class support for Nomad running Consul Connect Mesh Gateways. Mesh gateways enable services in the Connect mesh to make cross-DC connections via gateways, where each datacenter may not have full node interconnectivity. Consul docs with more information: https://www.consul.io/docs/connect/gateways/mesh-gateway The following group level service block can be used to establish a Connect mesh gateway. service { connect { gateway { mesh { // no configuration } } } } Services can make use of a mesh gateway by configuring so in their upstream blocks, e.g. service { connect { sidecar_service { proxy { upstreams { destination_name = "<service>" local_bind_port = <port> datacenter = "<datacenter>" mesh_gateway { mode = "<mode>" } } } } } } Typical use of a mesh gateway is to create a bridge between datacenters. A mesh gateway should then be configured with a service port that is mapped from a host_network configured on a WAN interface in Nomad agent config, e.g. client { host_network "public" { interface = "eth1" } } Create a port mapping in the group.network block for use by the mesh gateway service from the public host_network, e.g. network { mode = "bridge" port "mesh_wan" { host_network = "public" } } Use this port label for the service.port of the mesh gateway, e.g. service { name = "mesh-gateway" port = "mesh_wan" connect { gateway { mesh {} } } } Currently Envoy is the only supported gateway implementation in Consul. By default Nomad client will run the latest official Envoy docker image supported by the local Consul agent. The Envoy task can be customized by setting `meta.connect.gateway_image` in agent config or by setting the `connect.sidecar_task` block. Gateways require Consul 1.8.0+, enforced by the Nomad scheduler. Closes #9446
2021-04-12 19:10:10 +00:00
MeshGateway: &api.ConsulMeshGateway{Mode: "local"},
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
}}))
}
consul/connect: add support for connect mesh gateways This PR implements first-class support for Nomad running Consul Connect Mesh Gateways. Mesh gateways enable services in the Connect mesh to make cross-DC connections via gateways, where each datacenter may not have full node interconnectivity. Consul docs with more information: https://www.consul.io/docs/connect/gateways/mesh-gateway The following group level service block can be used to establish a Connect mesh gateway. service { connect { gateway { mesh { // no configuration } } } } Services can make use of a mesh gateway by configuring so in their upstream blocks, e.g. service { connect { sidecar_service { proxy { upstreams { destination_name = "<service>" local_bind_port = <port> datacenter = "<datacenter>" mesh_gateway { mode = "<mode>" } } } } } } Typical use of a mesh gateway is to create a bridge between datacenters. A mesh gateway should then be configured with a service port that is mapped from a host_network configured on a WAN interface in Nomad agent config, e.g. client { host_network "public" { interface = "eth1" } } Create a port mapping in the group.network block for use by the mesh gateway service from the public host_network, e.g. network { mode = "bridge" port "mesh_wan" { host_network = "public" } } Use this port label for the service.port of the mesh gateway, e.g. service { name = "mesh-gateway" port = "mesh_wan" connect { gateway { mesh {} } } } Currently Envoy is the only supported gateway implementation in Consul. By default Nomad client will run the latest official Envoy docker image supported by the local Consul agent. The Envoy task can be customized by setting `meta.connect.gateway_image` in agent config or by setting the `connect.sidecar_task` block. Gateways require Consul 1.8.0+, enforced by the Nomad scheduler. Closes #9446
2021-04-12 19:10:10 +00:00
func TestConversion_apiConsulMeshGatewayToStructs(t *testing.T) {
t.Parallel()
require.Nil(t, apiMeshGatewayToStructs(nil))
require.Equal(t, &structs.ConsulMeshGateway{Mode: "remote"},
apiMeshGatewayToStructs(&api.ConsulMeshGateway{Mode: "remote"}))
}
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
func TestConversion_apiConnectSidecarServiceProxyToStructs(t *testing.T) {
t.Parallel()
require.Nil(t, apiConnectSidecarServiceProxyToStructs(nil))
config := make(map[string]interface{})
require.Equal(t, &structs.ConsulProxy{
LocalServiceAddress: "192.168.30.1",
LocalServicePort: 9000,
Config: nil,
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
Upstreams: []structs.ConsulUpstream{{
DestinationName: "upstream",
}},
Expose: &structs.ConsulExposeConfig{
Paths: []structs.ConsulExposePath{{Path: "/health"}},
},
}, apiConnectSidecarServiceProxyToStructs(&api.ConsulProxy{
LocalServiceAddress: "192.168.30.1",
LocalServicePort: 9000,
Config: config,
Upstreams: []*api.ConsulUpstream{{
DestinationName: "upstream",
}},
ExposeConfig: &api.ConsulExposeConfig{
Path: []*api.ConsulExposePath{{
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
Path: "/health",
}},
},
}))
}
func TestConversion_apiConnectSidecarServiceToStructs(t *testing.T) {
t.Parallel()
require.Nil(t, apiConnectSidecarTaskToStructs(nil))
require.Equal(t, &structs.ConsulSidecarService{
Tags: []string{"foo"},
Port: "myPort",
Proxy: &structs.ConsulProxy{
LocalServiceAddress: "192.168.30.1",
},
}, apiConnectSidecarServiceToStructs(&api.ConsulSidecarService{
Tags: []string{"foo"},
Port: "myPort",
Proxy: &api.ConsulProxy{
LocalServiceAddress: "192.168.30.1",
},
}))
}
func TestConversion_ApiConsulConnectToStructs(t *testing.T) {
connect: enable proxy.passthrough configuration Enable configuration of HTTP and gRPC endpoints which should be exposed by the Connect sidecar proxy. This changeset is the first "non-magical" pass that lays the groundwork for enabling Consul service checks for tasks running in a network namespace because they are Connect-enabled. The changes here provide for full configuration of the connect { sidecar_service { proxy { expose { paths = [{ path = <exposed endpoint> protocol = <http or grpc> local_path_port = <local endpoint port> listener_port = <inbound mesh port> }, ... ] } } } stanza. Everything from `expose` and below is new, and partially implements the precedent set by Consul: https://www.consul.io/docs/connect/registration/service-registration.html#expose-paths-configuration-reference Combined with a task-group level network port-mapping in the form: port "exposeExample" { to = -1 } it is now possible to "punch a hole" through the network namespace to a specific HTTP or gRPC path, with the anticipated use case of creating Consul checks on Connect enabled services. A future PR may introduce more automagic behavior, where we can do things like 1) auto-fill the 'expose.path.local_path_port' with the default value of the 'service.port' value for task-group level connect-enabled services. 2) automatically generate a port-mapping 3) enable an 'expose.checks' flag which automatically creates exposed endpoints for every compatible consul service check (http/grpc checks on connect enabled services).
2020-03-07 03:15:22 +00:00
t.Parallel()
t.Run("nil", func(t *testing.T) {
require.Nil(t, ApiConsulConnectToStructs(nil))
})
t.Run("sidecar", func(t *testing.T) {
require.Equal(t, &structs.ConsulConnect{
Native: false,
SidecarService: &structs.ConsulSidecarService{Port: "myPort"},
SidecarTask: &structs.SidecarTask{Name: "task"},
}, ApiConsulConnectToStructs(&api.ConsulConnect{
Native: false,
SidecarService: &api.ConsulSidecarService{Port: "myPort"},
SidecarTask: &api.SidecarTask{Name: "task"},
}))
})
t.Run("gateway proxy", func(t *testing.T) {
require.Equal(t, &structs.ConsulConnect{
Gateway: &structs.ConsulGateway{
Proxy: &structs.ConsulGatewayProxy{
ConnectTimeout: helper.TimeToPtr(3 * time.Second),
EnvoyGatewayBindTaggedAddresses: true,
EnvoyGatewayBindAddresses: map[string]*structs.ConsulGatewayBindAddress{
"service": {
Address: "10.0.0.1",
Port: 9000,
}},
EnvoyGatewayNoDefaultBind: true,
EnvoyDNSDiscoveryType: "STRICT_DNS",
Config: map[string]interface{}{
"foo": "bar",
},
},
},
}, ApiConsulConnectToStructs(&api.ConsulConnect{
Gateway: &api.ConsulGateway{
Proxy: &api.ConsulGatewayProxy{
ConnectTimeout: helper.TimeToPtr(3 * time.Second),
EnvoyGatewayBindTaggedAddresses: true,
EnvoyGatewayBindAddresses: map[string]*api.ConsulGatewayBindAddress{
"service": {
Address: "10.0.0.1",
Port: 9000,
},
},
EnvoyGatewayNoDefaultBind: true,
EnvoyDNSDiscoveryType: "STRICT_DNS",
Config: map[string]interface{}{
"foo": "bar",
},
},
},
}))
})
t.Run("gateway ingress", func(t *testing.T) {
require.Equal(t, &structs.ConsulConnect{
Gateway: &structs.ConsulGateway{
Ingress: &structs.ConsulIngressConfigEntry{
TLS: &structs.ConsulGatewayTLSConfig{Enabled: true},
Listeners: []*structs.ConsulIngressListener{{
Port: 1111,
Protocol: "http",
Services: []*structs.ConsulIngressService{{
Name: "ingress1",
Hosts: []string{"host1"},
}},
}},
},
},
}, ApiConsulConnectToStructs(
&api.ConsulConnect{
Gateway: &api.ConsulGateway{
Ingress: &api.ConsulIngressConfigEntry{
TLS: &api.ConsulGatewayTLSConfig{Enabled: true},
Listeners: []*api.ConsulIngressListener{{
Port: 1111,
Protocol: "http",
Services: []*api.ConsulIngressService{{
Name: "ingress1",
Hosts: []string{"host1"},
}},
}},
},
},
},
))
})
t.Run("gateway terminating", func(t *testing.T) {
require.Equal(t, &structs.ConsulConnect{
Gateway: &structs.ConsulGateway{
Terminating: &structs.ConsulTerminatingConfigEntry{
Services: []*structs.ConsulLinkedService{{
Name: "linked-service",
CAFile: "ca.pem",
CertFile: "cert.pem",
KeyFile: "key.pem",
SNI: "linked.consul",
}},
},
},
}, ApiConsulConnectToStructs(&api.ConsulConnect{
Gateway: &api.ConsulGateway{
Terminating: &api.ConsulTerminatingConfigEntry{
Services: []*api.ConsulLinkedService{{
Name: "linked-service",
CAFile: "ca.pem",
CertFile: "cert.pem",
KeyFile: "key.pem",
SNI: "linked.consul",
}},
},
},
}))
})
consul/connect: add support for connect mesh gateways This PR implements first-class support for Nomad running Consul Connect Mesh Gateways. Mesh gateways enable services in the Connect mesh to make cross-DC connections via gateways, where each datacenter may not have full node interconnectivity. Consul docs with more information: https://www.consul.io/docs/connect/gateways/mesh-gateway The following group level service block can be used to establish a Connect mesh gateway. service { connect { gateway { mesh { // no configuration } } } } Services can make use of a mesh gateway by configuring so in their upstream blocks, e.g. service { connect { sidecar_service { proxy { upstreams { destination_name = "<service>" local_bind_port = <port> datacenter = "<datacenter>" mesh_gateway { mode = "<mode>" } } } } } } Typical use of a mesh gateway is to create a bridge between datacenters. A mesh gateway should then be configured with a service port that is mapped from a host_network configured on a WAN interface in Nomad agent config, e.g. client { host_network "public" { interface = "eth1" } } Create a port mapping in the group.network block for use by the mesh gateway service from the public host_network, e.g. network { mode = "bridge" port "mesh_wan" { host_network = "public" } } Use this port label for the service.port of the mesh gateway, e.g. service { name = "mesh-gateway" port = "mesh_wan" connect { gateway { mesh {} } } } Currently Envoy is the only supported gateway implementation in Consul. By default Nomad client will run the latest official Envoy docker image supported by the local Consul agent. The Envoy task can be customized by setting `meta.connect.gateway_image` in agent config or by setting the `connect.sidecar_task` block. Gateways require Consul 1.8.0+, enforced by the Nomad scheduler. Closes #9446
2021-04-12 19:10:10 +00:00
t.Run("gateway mesh", func(t *testing.T) {
require.Equal(t, &structs.ConsulConnect{
Gateway: &structs.ConsulGateway{
Mesh: &structs.ConsulMeshConfigEntry{
// nothing
},
},
}, ApiConsulConnectToStructs(&api.ConsulConnect{
Gateway: &api.ConsulGateway{
Mesh: &api.ConsulMeshConfigEntry{
// nothing
},
},
}))
})
t.Run("native", func(t *testing.T) {
require.Equal(t, &structs.ConsulConnect{
Native: true,
}, ApiConsulConnectToStructs(&api.ConsulConnect{
Native: true,
}))
})
}