2015-09-06 00:06:05 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2017-04-15 03:54:30 +00:00
|
|
|
"fmt"
|
2015-09-06 00:06:05 +00:00
|
|
|
"net/http"
|
2016-11-24 12:20:52 +00:00
|
|
|
"strconv"
|
2015-09-06 01:00:30 +00:00
|
|
|
"strings"
|
2015-09-06 01:20:47 +00:00
|
|
|
|
2016-11-29 00:05:56 +00:00
|
|
|
"github.com/golang/snappy"
|
2017-02-06 19:48:28 +00:00
|
|
|
"github.com/hashicorp/nomad/api"
|
2019-05-02 20:00:21 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2018-04-11 18:57:12 +00:00
|
|
|
"github.com/hashicorp/nomad/jobspec"
|
2020-10-21 14:22:56 +00:00
|
|
|
"github.com/hashicorp/nomad/jobspec2"
|
2015-09-06 01:20:47 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2015-09-06 00:06:05 +00:00
|
|
|
)
|
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) JobsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
switch req.Method {
|
|
|
|
case "GET":
|
|
|
|
return s.jobListRequest(resp, req)
|
|
|
|
case "PUT", "POST":
|
2015-09-06 18:47:52 +00:00
|
|
|
return s.jobUpdate(resp, req, "")
|
2015-09-06 01:00:30 +00:00
|
|
|
default:
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
2015-09-06 00:06:05 +00:00
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) jobListRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2015-09-06 19:32:22 +00:00
|
|
|
args := structs.JobListRequest{}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.JobListResponse
|
|
|
|
if err := s.agent.RPC("Job.List", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
2015-09-07 17:03:10 +00:00
|
|
|
if out.Jobs == nil {
|
|
|
|
out.Jobs = make([]*structs.JobListStub, 0)
|
|
|
|
}
|
2015-09-06 19:32:22 +00:00
|
|
|
return out.Jobs, nil
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) JobSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
path := strings.TrimPrefix(req.URL.Path, "/v1/job/")
|
|
|
|
switch {
|
|
|
|
case strings.HasSuffix(path, "/evaluate"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/evaluate")
|
|
|
|
return s.jobForceEvaluate(resp, req, jobName)
|
|
|
|
case strings.HasSuffix(path, "/allocations"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/allocations")
|
|
|
|
return s.jobAllocations(resp, req, jobName)
|
|
|
|
case strings.HasSuffix(path, "/evaluations"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/evaluations")
|
|
|
|
return s.jobEvaluations(resp, req, jobName)
|
2016-01-19 19:09:36 +00:00
|
|
|
case strings.HasSuffix(path, "/periodic/force"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/periodic/force")
|
|
|
|
return s.periodicForceRequest(resp, req, jobName)
|
2016-05-05 18:21:58 +00:00
|
|
|
case strings.HasSuffix(path, "/plan"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/plan")
|
|
|
|
return s.jobPlan(resp, req, jobName)
|
2016-07-18 23:51:47 +00:00
|
|
|
case strings.HasSuffix(path, "/summary"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/summary")
|
|
|
|
return s.jobSummaryRequest(resp, req, jobName)
|
2016-11-26 02:04:55 +00:00
|
|
|
case strings.HasSuffix(path, "/dispatch"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/dispatch")
|
|
|
|
return s.jobDispatchRequest(resp, req, jobName)
|
2017-04-13 23:55:21 +00:00
|
|
|
case strings.HasSuffix(path, "/versions"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/versions")
|
|
|
|
return s.jobVersions(resp, req, jobName)
|
2017-04-19 18:33:06 +00:00
|
|
|
case strings.HasSuffix(path, "/revert"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/revert")
|
|
|
|
return s.jobRevert(resp, req, jobName)
|
2017-07-01 00:23:34 +00:00
|
|
|
case strings.HasSuffix(path, "/deployments"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/deployments")
|
|
|
|
return s.jobDeployments(resp, req, jobName)
|
|
|
|
case strings.HasSuffix(path, "/deployment"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/deployment")
|
|
|
|
return s.jobLatestDeployment(resp, req, jobName)
|
2017-07-06 19:49:13 +00:00
|
|
|
case strings.HasSuffix(path, "/stable"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/stable")
|
|
|
|
return s.jobStable(resp, req, jobName)
|
2020-01-16 22:14:00 +00:00
|
|
|
case strings.HasSuffix(path, "/scale"):
|
2020-03-18 14:32:59 +00:00
|
|
|
jobName := strings.TrimSuffix(path, "/scale")
|
|
|
|
return s.jobScale(resp, req, jobName)
|
2015-09-06 01:00:30 +00:00
|
|
|
default:
|
|
|
|
return s.jobCRUD(resp, req, path)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
2015-09-06 00:06:05 +00:00
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) jobForceEvaluate(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2018-05-09 20:04:27 +00:00
|
|
|
var args structs.JobEvaluateRequest
|
2018-05-08 22:26:36 +00:00
|
|
|
|
2018-05-10 19:42:24 +00:00
|
|
|
// TODO(preetha): remove in 0.9
|
|
|
|
// COMPAT: For backwards compatibility allow using this endpoint without a payload
|
2018-05-09 20:04:27 +00:00
|
|
|
if req.ContentLength == 0 {
|
|
|
|
args = structs.JobEvaluateRequest{
|
|
|
|
JobID: jobName,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := decodeBody(req, &args); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
|
|
|
if args.JobID == "" {
|
|
|
|
return nil, CodedError(400, "Job ID must be specified")
|
|
|
|
}
|
|
|
|
|
|
|
|
if jobName != "" && args.JobID != jobName {
|
|
|
|
return nil, CodedError(400, "JobID not same as job name")
|
|
|
|
}
|
2015-09-06 18:50:37 +00:00
|
|
|
}
|
2017-09-07 23:56:15 +00:00
|
|
|
s.parseWriteRequest(req, &args.WriteRequest)
|
2015-09-06 18:50:37 +00:00
|
|
|
|
|
|
|
var out structs.JobRegisterResponse
|
|
|
|
if err := s.agent.RPC("Job.Evaluate", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out, nil
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 18:21:58 +00:00
|
|
|
func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
|
2017-02-16 01:40:51 +00:00
|
|
|
var args api.JobPlanRequest
|
2016-05-05 18:21:58 +00:00
|
|
|
if err := decodeBody(req, &args); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
|
|
|
if args.Job == nil {
|
|
|
|
return nil, CodedError(400, "Job must be specified")
|
|
|
|
}
|
2017-02-16 01:40:51 +00:00
|
|
|
if args.Job.ID == nil {
|
|
|
|
return nil, CodedError(400, "Job must have a valid ID")
|
|
|
|
}
|
|
|
|
if jobName != "" && *args.Job.ID != jobName {
|
2016-05-05 18:21:58 +00:00
|
|
|
return nil, CodedError(400, "Job ID does not match")
|
|
|
|
}
|
|
|
|
|
2020-07-06 19:46:09 +00:00
|
|
|
sJob, writeReq := s.apiJobAndRequestToStructs(args.Job, req, args.WriteRequest)
|
2017-02-16 01:40:51 +00:00
|
|
|
planReq := structs.JobPlanRequest{
|
2017-09-19 14:47:10 +00:00
|
|
|
Job: sJob,
|
|
|
|
Diff: args.Diff,
|
|
|
|
PolicyOverride: args.PolicyOverride,
|
2020-07-06 19:46:09 +00:00
|
|
|
WriteRequest: *writeReq,
|
2017-02-16 01:40:51 +00:00
|
|
|
}
|
2017-10-23 23:07:38 +00:00
|
|
|
|
2016-05-05 18:21:58 +00:00
|
|
|
var out structs.JobPlanResponse
|
2017-02-16 01:40:51 +00:00
|
|
|
if err := s.agent.RPC("Job.Plan", &planReq, &out); err != nil {
|
2016-05-05 18:21:58 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-05-12 01:51:48 +00:00
|
|
|
setIndex(resp, out.Index)
|
2016-05-05 18:21:58 +00:00
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
// Ensure request method is POST or PUT
|
|
|
|
if !(req.Method == "POST" || req.Method == "PUT") {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
|
|
|
|
var validateRequest api.JobValidateRequest
|
|
|
|
if err := decodeBody(req, &validateRequest); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
|
|
|
if validateRequest.Job == nil {
|
|
|
|
return nil, CodedError(400, "Job must be specified")
|
|
|
|
}
|
|
|
|
|
2017-03-03 23:00:39 +00:00
|
|
|
job := ApiJobToStructJob(validateRequest.Job)
|
2019-07-25 14:46:18 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
args := structs.JobValidateRequest{
|
|
|
|
Job: job,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: validateRequest.Region,
|
|
|
|
},
|
|
|
|
}
|
2017-09-07 23:56:15 +00:00
|
|
|
s.parseWriteRequest(req, &args.WriteRequest)
|
2017-10-23 23:07:38 +00:00
|
|
|
args.Namespace = job.Namespace
|
2017-02-06 19:48:28 +00:00
|
|
|
|
|
|
|
var out structs.JobValidateResponse
|
|
|
|
if err := s.agent.RPC("Job.Validate", &args, &out); err != nil {
|
2017-03-03 23:00:39 +00:00
|
|
|
return nil, err
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2016-01-19 19:09:36 +00:00
|
|
|
func (s *HTTPServer) periodicForceRequest(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
|
|
|
|
args := structs.PeriodicForceRequest{
|
|
|
|
JobID: jobName,
|
|
|
|
}
|
2017-09-07 23:56:15 +00:00
|
|
|
s.parseWriteRequest(req, &args.WriteRequest)
|
2016-01-19 19:09:36 +00:00
|
|
|
|
|
|
|
var out structs.PeriodicForceResponse
|
|
|
|
if err := s.agent.RPC("Periodic.Force", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) jobAllocations(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "GET" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2016-11-24 12:20:52 +00:00
|
|
|
allAllocs, _ := strconv.ParseBool(req.URL.Query().Get("all"))
|
|
|
|
|
2015-09-06 19:32:22 +00:00
|
|
|
args := structs.JobSpecificRequest{
|
2019-05-10 22:15:27 +00:00
|
|
|
JobID: jobName,
|
|
|
|
All: allAllocs,
|
2015-09-06 19:32:22 +00:00
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.JobAllocationsResponse
|
|
|
|
if err := s.agent.RPC("Job.Allocations", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
2015-09-07 17:03:10 +00:00
|
|
|
if out.Allocations == nil {
|
|
|
|
out.Allocations = make([]*structs.AllocListStub, 0)
|
|
|
|
}
|
2017-11-17 20:53:26 +00:00
|
|
|
for _, alloc := range out.Allocations {
|
|
|
|
alloc.SetEventDisplayMessages()
|
|
|
|
}
|
2015-09-06 19:32:22 +00:00
|
|
|
return out.Allocations, nil
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
2015-09-06 00:06:05 +00:00
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) jobEvaluations(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "GET" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2015-09-06 19:32:22 +00:00
|
|
|
args := structs.JobSpecificRequest{
|
|
|
|
JobID: jobName,
|
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.JobEvaluationsResponse
|
|
|
|
if err := s.agent.RPC("Job.Evaluations", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
2015-09-07 17:03:10 +00:00
|
|
|
if out.Evaluations == nil {
|
|
|
|
out.Evaluations = make([]*structs.Evaluation, 0)
|
|
|
|
}
|
2015-09-06 19:32:22 +00:00
|
|
|
return out.Evaluations, nil
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
2015-09-06 00:06:05 +00:00
|
|
|
|
2017-07-01 00:23:34 +00:00
|
|
|
func (s *HTTPServer) jobDeployments(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "GET" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
2019-05-10 22:15:27 +00:00
|
|
|
all, _ := strconv.ParseBool(req.URL.Query().Get("all"))
|
2017-07-01 00:23:34 +00:00
|
|
|
args := structs.JobSpecificRequest{
|
|
|
|
JobID: jobName,
|
2019-05-10 22:15:27 +00:00
|
|
|
All: all,
|
2017-07-01 00:23:34 +00:00
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.DeploymentListResponse
|
|
|
|
if err := s.agent.RPC("Job.Deployments", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
|
|
|
if out.Deployments == nil {
|
|
|
|
out.Deployments = make([]*structs.Deployment, 0)
|
|
|
|
}
|
|
|
|
return out.Deployments, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) jobLatestDeployment(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "GET" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
args := structs.JobSpecificRequest{
|
|
|
|
JobID: jobName,
|
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.SingleDeploymentResponse
|
|
|
|
if err := s.agent.RPC("Job.LatestDeployment", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
|
|
|
return out.Deployment, nil
|
|
|
|
}
|
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) jobCRUD(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
switch req.Method {
|
|
|
|
case "GET":
|
|
|
|
return s.jobQuery(resp, req, jobName)
|
|
|
|
case "PUT", "POST":
|
|
|
|
return s.jobUpdate(resp, req, jobName)
|
|
|
|
case "DELETE":
|
|
|
|
return s.jobDelete(resp, req, jobName)
|
|
|
|
default:
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) jobQuery(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
2015-09-06 01:43:40 +00:00
|
|
|
args := structs.JobSpecificRequest{
|
|
|
|
JobID: jobName,
|
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.SingleJobResponse
|
|
|
|
if err := s.agent.RPC("Job.GetJob", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
|
|
|
if out.Job == nil {
|
|
|
|
return nil, CodedError(404, "job not found")
|
|
|
|
}
|
2016-11-29 00:05:56 +00:00
|
|
|
|
2016-12-14 20:50:08 +00:00
|
|
|
// Decode the payload if there is any
|
2016-11-29 00:05:56 +00:00
|
|
|
job := out.Job
|
2016-12-14 20:50:08 +00:00
|
|
|
if len(job.Payload) != 0 {
|
|
|
|
decoded, err := snappy.Decode(nil, out.Job.Payload)
|
2016-11-29 00:05:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
job = job.Copy()
|
2016-12-14 20:50:08 +00:00
|
|
|
job.Payload = decoded
|
2016-11-29 00:05:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return job, nil
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
2017-02-16 01:40:51 +00:00
|
|
|
var args api.JobRegisterRequest
|
2015-09-23 03:01:57 +00:00
|
|
|
if err := decodeBody(req, &args); err != nil {
|
2015-09-06 02:08:47 +00:00
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
2015-09-21 00:38:26 +00:00
|
|
|
if args.Job == nil {
|
|
|
|
return nil, CodedError(400, "Job must be specified")
|
|
|
|
}
|
2017-02-16 01:40:51 +00:00
|
|
|
|
|
|
|
if args.Job.ID == nil {
|
|
|
|
return nil, CodedError(400, "Job ID hasn't been provided")
|
|
|
|
}
|
|
|
|
if jobName != "" && *args.Job.ID != jobName {
|
|
|
|
return nil, CodedError(400, "Job ID does not match name")
|
2015-09-06 02:08:47 +00:00
|
|
|
}
|
2020-06-15 20:18:14 +00:00
|
|
|
|
2020-07-22 09:10:59 +00:00
|
|
|
// GH-8481. Jobs of type system can only have a count of 1 and therefore do
|
|
|
|
// not support scaling. Even though this returns an error on the first
|
|
|
|
// occurrence, the error is generic but detailed enough that an operator
|
|
|
|
// can fix the problem across multiple task groups.
|
|
|
|
if args.Job.Type != nil && *args.Job.Type == api.JobTypeSystem {
|
|
|
|
for _, tg := range args.Job.TaskGroups {
|
|
|
|
if tg.Scaling != nil {
|
|
|
|
return nil, CodedError(400, "Task groups with job type system do not support scaling stanzas")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-06 19:46:09 +00:00
|
|
|
sJob, writeReq := s.apiJobAndRequestToStructs(args.Job, req, args.WriteRequest)
|
2017-02-16 01:40:51 +00:00
|
|
|
regReq := structs.JobRegisterRequest{
|
|
|
|
Job: sJob,
|
|
|
|
EnforceIndex: args.EnforceIndex,
|
|
|
|
JobModifyIndex: args.JobModifyIndex,
|
2017-09-19 14:47:10 +00:00
|
|
|
PolicyOverride: args.PolicyOverride,
|
2020-06-16 15:30:04 +00:00
|
|
|
PreserveCounts: args.PreserveCounts,
|
2020-07-06 19:46:09 +00:00
|
|
|
WriteRequest: *writeReq,
|
2017-02-16 01:40:51 +00:00
|
|
|
}
|
2017-10-23 23:07:38 +00:00
|
|
|
|
2015-09-06 02:08:47 +00:00
|
|
|
var out structs.JobRegisterResponse
|
2017-02-16 01:40:51 +00:00
|
|
|
if err := s.agent.RPC("Job.Register", ®Req, &out); err != nil {
|
2015-09-06 02:08:47 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out, nil
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) jobDelete(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
purgeStr := req.URL.Query().Get("purge")
|
|
|
|
var purgeBool bool
|
|
|
|
if purgeStr != "" {
|
|
|
|
var err error
|
|
|
|
purgeBool, err = strconv.ParseBool(purgeStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a bool: %v", "purge", purgeStr, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:28:13 +00:00
|
|
|
globalStr := req.URL.Query().Get("global")
|
|
|
|
var globalBool bool
|
|
|
|
if globalStr != "" {
|
|
|
|
var err error
|
|
|
|
globalBool, err = strconv.ParseBool(globalStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a bool: %v", "global", globalStr, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-06 01:20:47 +00:00
|
|
|
args := structs.JobDeregisterRequest{
|
2020-08-28 18:28:13 +00:00
|
|
|
JobID: jobName,
|
|
|
|
Purge: purgeBool,
|
|
|
|
Global: globalBool,
|
2015-09-06 01:20:47 +00:00
|
|
|
}
|
2017-09-07 23:56:15 +00:00
|
|
|
s.parseWriteRequest(req, &args.WriteRequest)
|
2015-09-06 01:20:47 +00:00
|
|
|
|
|
|
|
var out structs.JobDeregisterResponse
|
2017-02-23 02:22:02 +00:00
|
|
|
if err := s.agent.RPC("Job.Deregister", &args, &out); err != nil {
|
2015-09-06 01:20:47 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2015-09-06 01:43:40 +00:00
|
|
|
setIndex(resp, out.Index)
|
2015-09-06 01:20:47 +00:00
|
|
|
return out, nil
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2016-07-18 23:51:47 +00:00
|
|
|
|
2020-01-16 22:14:00 +00:00
|
|
|
func (s *HTTPServer) jobScale(resp http.ResponseWriter, req *http.Request,
|
2020-03-18 14:32:59 +00:00
|
|
|
jobName string) (interface{}, error) {
|
2020-01-27 22:14:28 +00:00
|
|
|
|
2020-01-26 16:13:56 +00:00
|
|
|
switch req.Method {
|
|
|
|
case "GET":
|
2020-03-18 14:32:59 +00:00
|
|
|
return s.jobScaleStatus(resp, req, jobName)
|
2020-01-26 16:13:56 +00:00
|
|
|
case "PUT", "POST":
|
2020-03-18 14:32:59 +00:00
|
|
|
return s.jobScaleAction(resp, req, jobName)
|
2020-01-26 16:13:56 +00:00
|
|
|
default:
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) jobScaleStatus(resp http.ResponseWriter, req *http.Request,
|
2020-03-18 14:32:59 +00:00
|
|
|
jobName string) (interface{}, error) {
|
2020-01-26 16:13:56 +00:00
|
|
|
|
2020-03-21 15:03:22 +00:00
|
|
|
args := structs.JobScaleStatusRequest{
|
2020-01-26 16:13:56 +00:00
|
|
|
JobID: jobName,
|
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2020-03-21 15:03:22 +00:00
|
|
|
var out structs.JobScaleStatusResponse
|
|
|
|
if err := s.agent.RPC("Job.ScaleStatus", &args, &out); err != nil {
|
2020-01-26 16:13:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
2020-03-21 15:03:22 +00:00
|
|
|
if out.JobScaleStatus == nil {
|
2020-01-26 16:13:56 +00:00
|
|
|
return nil, CodedError(404, "job not found")
|
|
|
|
}
|
|
|
|
|
2020-03-21 15:03:22 +00:00
|
|
|
return out.JobScaleStatus, nil
|
2020-01-26 16:13:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) jobScaleAction(resp http.ResponseWriter, req *http.Request,
|
2020-03-18 14:32:59 +00:00
|
|
|
jobName string) (interface{}, error) {
|
2020-01-26 16:13:56 +00:00
|
|
|
|
2020-01-16 22:14:00 +00:00
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
|
|
|
|
var args api.ScalingRequest
|
|
|
|
if err := decodeBody(req, &args); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
|
|
|
|
2020-03-18 14:32:59 +00:00
|
|
|
targetJob := args.Target[structs.ScalingTargetJob]
|
|
|
|
if targetJob != "" && targetJob != jobName {
|
|
|
|
return nil, CodedError(400, "job ID in payload did not match URL")
|
|
|
|
}
|
|
|
|
|
2020-01-16 22:14:00 +00:00
|
|
|
scaleReq := structs.JobScaleRequest{
|
2020-02-05 21:19:15 +00:00
|
|
|
JobID: jobName,
|
2020-03-18 14:32:59 +00:00
|
|
|
Target: args.Target,
|
|
|
|
Count: args.Count,
|
2020-01-16 22:14:00 +00:00
|
|
|
PolicyOverride: args.PolicyOverride,
|
2020-03-23 13:38:18 +00:00
|
|
|
Message: args.Message,
|
2020-03-18 14:32:59 +00:00
|
|
|
Error: args.Error,
|
|
|
|
Meta: args.Meta,
|
2020-01-16 22:14:00 +00:00
|
|
|
}
|
|
|
|
// parseWriteRequest overrides Namespace, Region and AuthToken
|
|
|
|
// based on values from the original http request
|
|
|
|
s.parseWriteRequest(req, &scaleReq.WriteRequest)
|
|
|
|
|
|
|
|
var out structs.JobRegisterResponse
|
|
|
|
if err := s.agent.RPC("Job.Scale", &scaleReq, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2017-04-13 23:55:21 +00:00
|
|
|
func (s *HTTPServer) jobVersions(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
2017-06-30 00:16:20 +00:00
|
|
|
|
|
|
|
diffsStr := req.URL.Query().Get("diffs")
|
|
|
|
var diffsBool bool
|
|
|
|
if diffsStr != "" {
|
|
|
|
var err error
|
|
|
|
diffsBool, err = strconv.ParseBool(diffsStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a bool: %v", "diffs", diffsStr, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
args := structs.JobVersionsRequest{
|
2017-04-13 23:55:21 +00:00
|
|
|
JobID: jobName,
|
2017-06-30 00:16:20 +00:00
|
|
|
Diffs: diffsBool,
|
2017-04-13 23:55:21 +00:00
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.JobVersionsResponse
|
|
|
|
if err := s.agent.RPC("Job.GetJobVersions", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
|
|
|
if len(out.Versions) == 0 {
|
|
|
|
return nil, CodedError(404, "job versions not found")
|
|
|
|
}
|
|
|
|
|
2017-06-30 00:16:20 +00:00
|
|
|
return out, nil
|
2017-04-13 23:55:21 +00:00
|
|
|
}
|
|
|
|
|
2017-04-19 18:33:06 +00:00
|
|
|
func (s *HTTPServer) jobRevert(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
|
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
|
|
|
|
var revertRequest structs.JobRevertRequest
|
|
|
|
if err := decodeBody(req, &revertRequest); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
|
|
|
if revertRequest.JobID == "" {
|
|
|
|
return nil, CodedError(400, "JobID must be specified")
|
|
|
|
}
|
|
|
|
if revertRequest.JobID != jobName {
|
|
|
|
return nil, CodedError(400, "Job ID does not match")
|
|
|
|
}
|
|
|
|
|
2017-09-07 23:56:15 +00:00
|
|
|
s.parseWriteRequest(req, &revertRequest.WriteRequest)
|
2017-04-19 18:33:06 +00:00
|
|
|
|
|
|
|
var out structs.JobRegisterResponse
|
|
|
|
if err := s.agent.RPC("Job.Revert", &revertRequest, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2017-07-06 19:49:13 +00:00
|
|
|
func (s *HTTPServer) jobStable(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
|
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
|
|
|
|
var stableRequest structs.JobStabilityRequest
|
|
|
|
if err := decodeBody(req, &stableRequest); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
|
|
|
if stableRequest.JobID == "" {
|
|
|
|
return nil, CodedError(400, "JobID must be specified")
|
|
|
|
}
|
|
|
|
if stableRequest.JobID != jobName {
|
|
|
|
return nil, CodedError(400, "Job ID does not match")
|
|
|
|
}
|
|
|
|
|
2017-09-07 23:56:15 +00:00
|
|
|
s.parseWriteRequest(req, &stableRequest.WriteRequest)
|
2017-07-06 19:49:13 +00:00
|
|
|
|
|
|
|
var out structs.JobStabilityResponse
|
|
|
|
if err := s.agent.RPC("Job.Stable", &stableRequest, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2016-07-18 23:51:47 +00:00
|
|
|
func (s *HTTPServer) jobSummaryRequest(resp http.ResponseWriter, req *http.Request, name string) (interface{}, error) {
|
|
|
|
args := structs.JobSummaryRequest{
|
|
|
|
JobID: name,
|
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-07-21 21:43:21 +00:00
|
|
|
var out structs.JobSummaryResponse
|
2016-07-25 21:33:39 +00:00
|
|
|
if err := s.agent.RPC("Job.Summary", &args, &out); err != nil {
|
2016-07-18 23:51:47 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
|
|
|
if out.JobSummary == nil {
|
|
|
|
return nil, CodedError(404, "job not found")
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out.JobSummary, nil
|
|
|
|
}
|
2016-11-26 02:04:55 +00:00
|
|
|
|
|
|
|
func (s *HTTPServer) jobDispatchRequest(resp http.ResponseWriter, req *http.Request, name string) (interface{}, error) {
|
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
2016-12-02 00:27:22 +00:00
|
|
|
args := structs.JobDispatchRequest{}
|
2016-11-26 02:04:55 +00:00
|
|
|
if err := decodeBody(req, &args); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
2016-12-02 00:27:22 +00:00
|
|
|
if args.JobID != "" && args.JobID != name {
|
|
|
|
return nil, CodedError(400, "Job ID does not match")
|
|
|
|
}
|
|
|
|
if args.JobID == "" {
|
|
|
|
args.JobID = name
|
|
|
|
}
|
|
|
|
|
2017-09-07 23:56:15 +00:00
|
|
|
s.parseWriteRequest(req, &args.WriteRequest)
|
2016-11-26 02:04:55 +00:00
|
|
|
|
|
|
|
var out structs.JobDispatchResponse
|
|
|
|
if err := s.agent.RPC("Job.Dispatch", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out, nil
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
|
2018-04-11 18:57:12 +00:00
|
|
|
// JobsParseRequest parses a hcl jobspec and returns a api.Job
|
|
|
|
func (s *HTTPServer) JobsParseRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
if req.Method != http.MethodPut && req.Method != http.MethodPost {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
|
|
|
|
args := &api.JobsParseRequest{}
|
|
|
|
if err := decodeBody(req, &args); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
|
|
|
if args.JobHCL == "" {
|
|
|
|
return nil, CodedError(400, "Job spec is empty")
|
|
|
|
}
|
|
|
|
|
2020-10-21 14:22:56 +00:00
|
|
|
var jobStruct *api.Job
|
|
|
|
var err error
|
|
|
|
if args.HCLv1 {
|
2020-11-09 20:01:31 +00:00
|
|
|
jobStruct, err = jobspec.Parse(strings.NewReader(args.JobHCL))
|
2020-10-21 14:22:56 +00:00
|
|
|
} else {
|
2020-11-09 20:01:31 +00:00
|
|
|
jobStruct, err = jobspec2.ParseWithConfig(&jobspec2.ParseConfig{
|
|
|
|
Path: "input.hcl",
|
|
|
|
Body: []byte(args.JobHCL),
|
|
|
|
AllowFS: false,
|
|
|
|
})
|
2020-10-21 14:22:56 +00:00
|
|
|
}
|
2018-04-11 18:57:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
|
|
|
|
2018-04-12 18:46:15 +00:00
|
|
|
if args.Canonicalize {
|
|
|
|
jobStruct.Canonicalize()
|
|
|
|
}
|
2018-04-11 18:57:12 +00:00
|
|
|
return jobStruct, nil
|
|
|
|
}
|
|
|
|
|
2020-07-06 19:46:09 +00:00
|
|
|
// apiJobAndRequestToStructs parses the query params from the incoming
|
|
|
|
// request and converts to a structs.Job and WriteRequest with the
|
|
|
|
func (s *HTTPServer) apiJobAndRequestToStructs(job *api.Job, req *http.Request, apiReq api.WriteRequest) (*structs.Job, *structs.WriteRequest) {
|
|
|
|
|
|
|
|
// parseWriteRequest gets the Namespace, Region, and AuthToken from
|
|
|
|
// the original HTTP request's query params and headers and overrides
|
|
|
|
// those values set in the request body
|
|
|
|
writeReq := &structs.WriteRequest{
|
|
|
|
Namespace: apiReq.Namespace,
|
|
|
|
Region: apiReq.Region,
|
|
|
|
AuthToken: apiReq.SecretID,
|
|
|
|
}
|
|
|
|
|
|
|
|
queryRegion := req.URL.Query().Get("region")
|
|
|
|
s.parseToken(req, &writeReq.AuthToken)
|
|
|
|
parseNamespace(req, &writeReq.Namespace)
|
|
|
|
|
|
|
|
requestRegion, jobRegion := regionForJob(
|
|
|
|
job, queryRegion, writeReq.Region, s.agent.config.Region,
|
|
|
|
)
|
|
|
|
|
|
|
|
sJob := ApiJobToStructJob(job)
|
|
|
|
sJob.Region = jobRegion
|
|
|
|
writeReq.Region = requestRegion
|
|
|
|
writeReq.Namespace = sJob.Namespace
|
|
|
|
|
|
|
|
return sJob, writeReq
|
|
|
|
}
|
|
|
|
|
|
|
|
func regionForJob(job *api.Job, queryRegion, apiRegion, agentRegion string) (string, string) {
|
|
|
|
var requestRegion string
|
|
|
|
var jobRegion string
|
|
|
|
|
|
|
|
// Region in query param (-region flag) takes precedence.
|
|
|
|
if queryRegion != "" {
|
|
|
|
requestRegion = queryRegion
|
|
|
|
jobRegion = queryRegion
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next the request body...
|
|
|
|
if apiRegion != "" {
|
|
|
|
requestRegion = apiRegion
|
|
|
|
jobRegion = apiRegion
|
|
|
|
}
|
|
|
|
|
|
|
|
// If no query param was passed, we forward to the job's region
|
|
|
|
// if one is available
|
|
|
|
if requestRegion == "" && job.Region != nil {
|
|
|
|
requestRegion = *job.Region
|
|
|
|
jobRegion = *job.Region
|
|
|
|
}
|
|
|
|
|
|
|
|
// otherwise we default to the region of this node
|
|
|
|
if requestRegion == "" || requestRegion == api.GlobalRegion {
|
|
|
|
requestRegion = agentRegion
|
|
|
|
jobRegion = agentRegion
|
|
|
|
}
|
|
|
|
|
|
|
|
// Multiregion jobs have their job region set to the global region,
|
|
|
|
// and enforce that we forward to a region where they will be deployed
|
|
|
|
if job.Multiregion != nil {
|
|
|
|
jobRegion = api.GlobalRegion
|
|
|
|
|
|
|
|
// multiregion jobs with 0 regions won't pass validation,
|
|
|
|
// but this protects us from NPE
|
|
|
|
if len(job.Multiregion.Regions) > 0 {
|
|
|
|
found := false
|
|
|
|
for _, region := range job.Multiregion.Regions {
|
|
|
|
if region.Name == requestRegion {
|
|
|
|
found = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !found {
|
|
|
|
requestRegion = job.Multiregion.Regions[0].Name
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return requestRegion, jobRegion
|
|
|
|
}
|
|
|
|
|
2017-03-03 23:00:39 +00:00
|
|
|
func ApiJobToStructJob(job *api.Job) *structs.Job {
|
2017-02-06 19:48:28 +00:00
|
|
|
job.Canonicalize()
|
|
|
|
|
|
|
|
j := &structs.Job{
|
2020-07-17 14:41:45 +00:00
|
|
|
Stop: *job.Stop,
|
|
|
|
Region: *job.Region,
|
|
|
|
Namespace: *job.Namespace,
|
|
|
|
ID: *job.ID,
|
|
|
|
ParentID: *job.ParentID,
|
|
|
|
Name: *job.Name,
|
|
|
|
Type: *job.Type,
|
|
|
|
Priority: *job.Priority,
|
|
|
|
AllAtOnce: *job.AllAtOnce,
|
|
|
|
Datacenters: job.Datacenters,
|
|
|
|
Payload: job.Payload,
|
|
|
|
Meta: job.Meta,
|
|
|
|
ConsulToken: *job.ConsulToken,
|
|
|
|
VaultToken: *job.VaultToken,
|
|
|
|
VaultNamespace: *job.VaultNamespace,
|
|
|
|
Constraints: ApiConstraintsToStructs(job.Constraints),
|
|
|
|
Affinities: ApiAffinitiesToStructs(job.Affinities),
|
2018-07-16 13:30:58 +00:00
|
|
|
}
|
|
|
|
|
2019-05-21 19:37:19 +00:00
|
|
|
// Update has been pushed into the task groups. stagger and max_parallel are
|
|
|
|
// preserved at the job level, but all other values are discarded. The job.Update
|
|
|
|
// api value is merged into TaskGroups already in api.Canonicalize
|
2019-09-02 17:30:09 +00:00
|
|
|
if job.Update != nil && job.Update.MaxParallel != nil && *job.Update.MaxParallel > 0 {
|
2017-07-07 02:08:51 +00:00
|
|
|
j.Update = structs.UpdateStrategy{}
|
|
|
|
|
|
|
|
if job.Update.Stagger != nil {
|
|
|
|
j.Update.Stagger = *job.Update.Stagger
|
2017-05-09 00:44:26 +00:00
|
|
|
}
|
|
|
|
if job.Update.MaxParallel != nil {
|
|
|
|
j.Update.MaxParallel = *job.Update.MaxParallel
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2018-07-18 15:53:03 +00:00
|
|
|
if l := len(job.Spreads); l != 0 {
|
|
|
|
j.Spreads = make([]*structs.Spread, l)
|
|
|
|
for i, apiSpread := range job.Spreads {
|
|
|
|
j.Spreads[i] = ApiSpreadToStructs(apiSpread)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
if job.Periodic != nil {
|
|
|
|
j.Periodic = &structs.PeriodicConfig{
|
2017-02-13 23:18:17 +00:00
|
|
|
Enabled: *job.Periodic.Enabled,
|
|
|
|
SpecType: *job.Periodic.SpecType,
|
|
|
|
ProhibitOverlap: *job.Periodic.ProhibitOverlap,
|
2017-02-21 00:36:41 +00:00
|
|
|
TimeZone: *job.Periodic.TimeZone,
|
2017-02-13 23:18:17 +00:00
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-13 23:18:17 +00:00
|
|
|
if job.Periodic.Spec != nil {
|
|
|
|
j.Periodic.Spec = *job.Periodic.Spec
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
if job.ParameterizedJob != nil {
|
|
|
|
j.ParameterizedJob = &structs.ParameterizedJobConfig{
|
|
|
|
Payload: job.ParameterizedJob.Payload,
|
|
|
|
MetaRequired: job.ParameterizedJob.MetaRequired,
|
|
|
|
MetaOptional: job.ParameterizedJob.MetaOptional,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-09 17:56:58 +00:00
|
|
|
if job.Multiregion != nil {
|
|
|
|
j.Multiregion = &structs.Multiregion{}
|
|
|
|
j.Multiregion.Strategy = &structs.MultiregionStrategy{
|
|
|
|
MaxParallel: *job.Multiregion.Strategy.MaxParallel,
|
2020-06-16 19:17:53 +00:00
|
|
|
OnFailure: *job.Multiregion.Strategy.OnFailure,
|
2020-06-09 17:56:58 +00:00
|
|
|
}
|
|
|
|
j.Multiregion.Regions = []*structs.MultiregionRegion{}
|
|
|
|
for _, region := range job.Multiregion.Regions {
|
|
|
|
r := &structs.MultiregionRegion{}
|
2020-06-15 14:05:31 +00:00
|
|
|
r.Name = region.Name
|
2020-06-09 17:56:58 +00:00
|
|
|
r.Count = *region.Count
|
|
|
|
r.Datacenters = region.Datacenters
|
|
|
|
r.Meta = region.Meta
|
|
|
|
j.Multiregion.Regions = append(j.Multiregion.Regions, r)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-16 23:54:02 +00:00
|
|
|
if l := len(job.TaskGroups); l != 0 {
|
|
|
|
j.TaskGroups = make([]*structs.TaskGroup, l)
|
|
|
|
for i, taskGroup := range job.TaskGroups {
|
|
|
|
tg := &structs.TaskGroup{}
|
2020-01-15 20:51:57 +00:00
|
|
|
ApiTgToStructsTG(j, taskGroup, tg)
|
2017-04-16 23:54:02 +00:00
|
|
|
j.TaskGroups[i] = tg
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return j
|
|
|
|
}
|
|
|
|
|
2020-01-15 20:51:57 +00:00
|
|
|
func ApiTgToStructsTG(job *structs.Job, taskGroup *api.TaskGroup, tg *structs.TaskGroup) {
|
2017-02-06 19:48:28 +00:00
|
|
|
tg.Name = *taskGroup.Name
|
|
|
|
tg.Count = *taskGroup.Count
|
|
|
|
tg.Meta = taskGroup.Meta
|
2018-10-11 21:05:19 +00:00
|
|
|
tg.Constraints = ApiConstraintsToStructs(taskGroup.Constraints)
|
|
|
|
tg.Affinities = ApiAffinitiesToStructs(taskGroup.Affinities)
|
2019-04-29 17:15:12 +00:00
|
|
|
tg.Networks = ApiNetworkResourceToStructs(taskGroup.Networks)
|
2019-06-24 15:29:26 +00:00
|
|
|
tg.Services = ApiServicesToStructs(taskGroup.Services)
|
2021-03-16 18:22:21 +00:00
|
|
|
tg.Consul = apiConsulToStructs(taskGroup.Consul)
|
2018-07-16 13:30:58 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
tg.RestartPolicy = &structs.RestartPolicy{
|
2017-02-13 23:18:17 +00:00
|
|
|
Attempts: *taskGroup.RestartPolicy.Attempts,
|
|
|
|
Interval: *taskGroup.RestartPolicy.Interval,
|
|
|
|
Delay: *taskGroup.RestartPolicy.Delay,
|
|
|
|
Mode: *taskGroup.RestartPolicy.Mode,
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2019-12-04 16:06:01 +00:00
|
|
|
if taskGroup.ShutdownDelay != nil {
|
|
|
|
tg.ShutdownDelay = taskGroup.ShutdownDelay
|
|
|
|
}
|
|
|
|
|
2020-05-13 20:39:04 +00:00
|
|
|
if taskGroup.StopAfterClientDisconnect != nil {
|
|
|
|
tg.StopAfterClientDisconnect = taskGroup.StopAfterClientDisconnect
|
|
|
|
}
|
|
|
|
|
2018-03-19 17:16:13 +00:00
|
|
|
if taskGroup.ReschedulePolicy != nil {
|
|
|
|
tg.ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: *taskGroup.ReschedulePolicy.Attempts,
|
|
|
|
Interval: *taskGroup.ReschedulePolicy.Interval,
|
|
|
|
Delay: *taskGroup.ReschedulePolicy.Delay,
|
|
|
|
DelayFunction: *taskGroup.ReschedulePolicy.DelayFunction,
|
|
|
|
MaxDelay: *taskGroup.ReschedulePolicy.MaxDelay,
|
|
|
|
Unlimited: *taskGroup.ReschedulePolicy.Unlimited,
|
|
|
|
}
|
2018-01-18 20:49:01 +00:00
|
|
|
}
|
|
|
|
|
2018-03-01 19:21:32 +00:00
|
|
|
if taskGroup.Migrate != nil {
|
|
|
|
tg.Migrate = &structs.MigrateStrategy{
|
|
|
|
MaxParallel: *taskGroup.Migrate.MaxParallel,
|
|
|
|
HealthCheck: *taskGroup.Migrate.HealthCheck,
|
|
|
|
MinHealthyTime: *taskGroup.Migrate.MinHealthyTime,
|
|
|
|
HealthyDeadline: *taskGroup.Migrate.HealthyDeadline,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-15 20:51:57 +00:00
|
|
|
if taskGroup.Scaling != nil {
|
2020-03-22 11:54:04 +00:00
|
|
|
tg.Scaling = ApiScalingPolicyToStructs(tg.Count, taskGroup.Scaling).TargetTaskGroup(job, tg)
|
2020-01-15 20:51:57 +00:00
|
|
|
}
|
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
tg.EphemeralDisk = &structs.EphemeralDisk{
|
|
|
|
Sticky: *taskGroup.EphemeralDisk.Sticky,
|
|
|
|
SizeMB: *taskGroup.EphemeralDisk.SizeMB,
|
|
|
|
Migrate: *taskGroup.EphemeralDisk.Migrate,
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2018-07-18 15:53:03 +00:00
|
|
|
if l := len(taskGroup.Spreads); l != 0 {
|
|
|
|
tg.Spreads = make([]*structs.Spread, l)
|
|
|
|
for k, spread := range taskGroup.Spreads {
|
|
|
|
tg.Spreads[k] = ApiSpreadToStructs(spread)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 14:46:18 +00:00
|
|
|
if l := len(taskGroup.Volumes); l != 0 {
|
|
|
|
tg.Volumes = make(map[string]*structs.VolumeRequest, l)
|
|
|
|
for k, v := range taskGroup.Volumes {
|
2020-02-07 09:21:56 +00:00
|
|
|
if v.Type != structs.VolumeTypeHost && v.Type != structs.VolumeTypeCSI {
|
|
|
|
// Ignore volumes we don't understand in this iteration currently.
|
|
|
|
// - This is because we don't currently have a way to return errors here.
|
2019-07-25 14:46:18 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-08-01 09:33:26 +00:00
|
|
|
vol := &structs.VolumeRequest{
|
2021-04-02 18:36:13 +00:00
|
|
|
Name: v.Name,
|
|
|
|
Type: v.Type,
|
|
|
|
ReadOnly: v.ReadOnly,
|
|
|
|
Source: v.Source,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentMode(v.AttachmentMode),
|
|
|
|
AccessMode: structs.CSIVolumeAccessMode(v.AccessMode),
|
|
|
|
PerAlloc: v.PerAlloc,
|
2019-07-25 14:46:18 +00:00
|
|
|
}
|
|
|
|
|
2020-03-23 17:55:26 +00:00
|
|
|
if v.MountOptions != nil {
|
|
|
|
vol.MountOptions = &structs.CSIMountOptions{
|
|
|
|
FSType: v.MountOptions.FSType,
|
|
|
|
MountFlags: v.MountOptions.MountFlags,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-01 09:33:26 +00:00
|
|
|
tg.Volumes[k] = vol
|
2019-07-25 14:46:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-09 00:44:26 +00:00
|
|
|
if taskGroup.Update != nil {
|
|
|
|
tg.Update = &structs.UpdateStrategy{
|
2018-03-23 17:56:00 +00:00
|
|
|
Stagger: *taskGroup.Update.Stagger,
|
|
|
|
MaxParallel: *taskGroup.Update.MaxParallel,
|
|
|
|
HealthCheck: *taskGroup.Update.HealthCheck,
|
|
|
|
MinHealthyTime: *taskGroup.Update.MinHealthyTime,
|
|
|
|
HealthyDeadline: *taskGroup.Update.HealthyDeadline,
|
|
|
|
ProgressDeadline: *taskGroup.Update.ProgressDeadline,
|
|
|
|
Canary: *taskGroup.Update.Canary,
|
2017-05-09 00:44:26 +00:00
|
|
|
}
|
2019-05-21 19:37:19 +00:00
|
|
|
|
|
|
|
// boolPtr fields may be nil, others will have pointers to default values via Canonicalize
|
|
|
|
if taskGroup.Update.AutoRevert != nil {
|
|
|
|
tg.Update.AutoRevert = *taskGroup.Update.AutoRevert
|
|
|
|
}
|
|
|
|
|
|
|
|
if taskGroup.Update.AutoPromote != nil {
|
|
|
|
tg.Update.AutoPromote = *taskGroup.Update.AutoPromote
|
|
|
|
}
|
2017-05-09 00:44:26 +00:00
|
|
|
}
|
|
|
|
|
2017-04-16 23:54:02 +00:00
|
|
|
if l := len(taskGroup.Tasks); l != 0 {
|
|
|
|
tg.Tasks = make([]*structs.Task, l)
|
|
|
|
for l, task := range taskGroup.Tasks {
|
|
|
|
t := &structs.Task{}
|
2020-09-09 22:30:40 +00:00
|
|
|
ApiTaskToStructsTask(job, tg, task, t)
|
2020-07-17 14:41:45 +00:00
|
|
|
|
|
|
|
// Set the tasks vault namespace from Job if it was not
|
|
|
|
// specified by the task or group
|
|
|
|
if t.Vault != nil && t.Vault.Namespace == "" && job.VaultNamespace != "" {
|
|
|
|
t.Vault.Namespace = job.VaultNamespace
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
tg.Tasks[l] = t
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-06 18:50:22 +00:00
|
|
|
// ApiTaskToStructsTask is a copy and type conversion between the API
|
|
|
|
// representation of a task from a struct representation of a task.
|
2020-09-09 22:30:40 +00:00
|
|
|
func ApiTaskToStructsTask(job *structs.Job, group *structs.TaskGroup,
|
|
|
|
apiTask *api.Task, structsTask *structs.Task) {
|
|
|
|
|
2017-02-13 23:18:17 +00:00
|
|
|
structsTask.Name = apiTask.Name
|
2017-02-06 19:48:28 +00:00
|
|
|
structsTask.Driver = apiTask.Driver
|
|
|
|
structsTask.User = apiTask.User
|
2017-02-21 00:36:41 +00:00
|
|
|
structsTask.Leader = apiTask.Leader
|
2017-02-06 19:48:28 +00:00
|
|
|
structsTask.Config = apiTask.Config
|
|
|
|
structsTask.Env = apiTask.Env
|
2017-04-16 23:54:02 +00:00
|
|
|
structsTask.Meta = apiTask.Meta
|
|
|
|
structsTask.KillTimeout = *apiTask.KillTimeout
|
2017-08-17 00:54:11 +00:00
|
|
|
structsTask.ShutdownDelay = apiTask.ShutdownDelay
|
2017-12-06 18:50:22 +00:00
|
|
|
structsTask.KillSignal = apiTask.KillSignal
|
2019-08-12 22:41:40 +00:00
|
|
|
structsTask.Kind = structs.TaskKind(apiTask.Kind)
|
2018-10-11 21:05:19 +00:00
|
|
|
structsTask.Constraints = ApiConstraintsToStructs(apiTask.Constraints)
|
|
|
|
structsTask.Affinities = ApiAffinitiesToStructs(apiTask.Affinities)
|
2019-10-22 13:20:26 +00:00
|
|
|
structsTask.CSIPluginConfig = ApiCSIPluginConfigToStructsCSIPluginConfig(apiTask.CSIPluginConfig)
|
2018-07-16 13:30:58 +00:00
|
|
|
|
2020-03-07 02:52:58 +00:00
|
|
|
if apiTask.RestartPolicy != nil {
|
|
|
|
structsTask.RestartPolicy = &structs.RestartPolicy{
|
|
|
|
Attempts: *apiTask.RestartPolicy.Attempts,
|
|
|
|
Interval: *apiTask.RestartPolicy.Interval,
|
|
|
|
Delay: *apiTask.RestartPolicy.Delay,
|
|
|
|
Mode: *apiTask.RestartPolicy.Mode,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-25 14:46:18 +00:00
|
|
|
if l := len(apiTask.VolumeMounts); l != 0 {
|
|
|
|
structsTask.VolumeMounts = make([]*structs.VolumeMount, l)
|
|
|
|
for i, mount := range apiTask.VolumeMounts {
|
|
|
|
structsTask.VolumeMounts[i] = &structs.VolumeMount{
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
Volume: *mount.Volume,
|
|
|
|
Destination: *mount.Destination,
|
|
|
|
ReadOnly: *mount.ReadOnly,
|
|
|
|
PropagationMode: *mount.PropagationMode,
|
2019-07-25 14:46:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-09 22:30:40 +00:00
|
|
|
if l := len(apiTask.ScalingPolicies); l != 0 {
|
|
|
|
structsTask.ScalingPolicies = make([]*structs.ScalingPolicy, l)
|
|
|
|
for i, policy := range apiTask.ScalingPolicies {
|
|
|
|
structsTask.ScalingPolicies[i] = ApiScalingPolicyToStructs(0, policy).TargetTask(job, group, structsTask)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-16 23:54:02 +00:00
|
|
|
if l := len(apiTask.Services); l != 0 {
|
|
|
|
structsTask.Services = make([]*structs.Service, l)
|
|
|
|
for i, service := range apiTask.Services {
|
|
|
|
structsTask.Services[i] = &structs.Service{
|
client: enable configuring enable_tag_override for services
Consul provides a feature of Service Definitions where the tags
associated with a service can be modified through the Catalog API,
overriding the value(s) configured in the agent's service configuration.
To enable this feature, the flag enable_tag_override must be configured
in the service definition.
Previously, Nomad did not allow configuring this flag, and thus the default
value of false was used. Now, it is configurable.
Because Nomad itself acts as a state machine around the the service definitions
of the tasks it manages, it's worth describing what happens when this feature
is enabled and why.
Consider the basic case where there is no Nomad, and your service is provided
to consul as a boring JSON file. The ultimate source of truth for the definition
of that service is the file, and is stored in the agent. Later, Consul performs
"anti-entropy" which synchronizes the Catalog (stored only the leaders). Then
with enable_tag_override=true, the tags field is available for "external"
modification through the Catalog API (rather than directly configuring the
service definition file, or using the Agent API). The important observation
is that if the service definition ever changes (i.e. the file is changed &
config reloaded OR the Agent API is used to modify the service), those
"external" tag values are thrown away, and the new service definition is
once again the source of truth.
In the Nomad case, Nomad itself is the source of truth over the Agent in
the same way the JSON file was the source of truth in the example above.
That means any time Nomad sets a new service definition, any externally
configured tags are going to be replaced. When does this happen? Only on
major lifecycle events, for example when a task is modified because of an
updated job spec from the 'nomad job run <existing>' command. Otherwise,
Nomad's periodic re-sync's with Consul will now no longer try to restore
the externally modified tag values (as long as enable_tag_override=true).
Fixes #2057
2020-02-07 21:22:19 +00:00
|
|
|
Name: service.Name,
|
|
|
|
PortLabel: service.PortLabel,
|
|
|
|
Tags: service.Tags,
|
|
|
|
CanaryTags: service.CanaryTags,
|
|
|
|
EnableTagOverride: service.EnableTagOverride,
|
|
|
|
AddressMode: service.AddressMode,
|
|
|
|
Meta: helper.CopyMapStringString(service.Meta),
|
|
|
|
CanaryMeta: helper.CopyMapStringString(service.CanaryMeta),
|
2021-01-22 19:45:26 +00:00
|
|
|
OnUpdate: service.OnUpdate,
|
2017-04-16 23:54:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if l := len(service.Checks); l != 0 {
|
|
|
|
structsTask.Services[i].Checks = make([]*structs.ServiceCheck, l)
|
|
|
|
for j, check := range service.Checks {
|
2021-01-22 19:45:26 +00:00
|
|
|
onUpdate := service.OnUpdate // Inherit from service as default
|
|
|
|
if check.OnUpdate != "" {
|
|
|
|
onUpdate = check.OnUpdate
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
structsTask.Services[i].Checks[j] = &structs.ServiceCheck{
|
2020-08-08 01:22:06 +00:00
|
|
|
Name: check.Name,
|
|
|
|
Type: check.Type,
|
|
|
|
Command: check.Command,
|
|
|
|
Args: check.Args,
|
|
|
|
Path: check.Path,
|
|
|
|
Protocol: check.Protocol,
|
|
|
|
PortLabel: check.PortLabel,
|
|
|
|
AddressMode: check.AddressMode,
|
|
|
|
Interval: check.Interval,
|
|
|
|
Timeout: check.Timeout,
|
|
|
|
InitialStatus: check.InitialStatus,
|
|
|
|
TLSSkipVerify: check.TLSSkipVerify,
|
|
|
|
Header: check.Header,
|
|
|
|
Method: check.Method,
|
2021-03-25 01:51:13 +00:00
|
|
|
Body: check.Body,
|
2020-08-08 01:22:06 +00:00
|
|
|
GRPCService: check.GRPCService,
|
|
|
|
GRPCUseTLS: check.GRPCUseTLS,
|
|
|
|
SuccessBeforePassing: check.SuccessBeforePassing,
|
|
|
|
FailuresBeforeCritical: check.FailuresBeforeCritical,
|
2021-01-22 19:45:26 +00:00
|
|
|
OnUpdate: onUpdate,
|
2017-08-25 00:18:06 +00:00
|
|
|
}
|
|
|
|
if check.CheckRestart != nil {
|
|
|
|
structsTask.Services[i].Checks[j].CheckRestart = &structs.CheckRestart{
|
2017-09-11 00:00:25 +00:00
|
|
|
Limit: check.CheckRestart.Limit,
|
2017-09-11 00:31:55 +00:00
|
|
|
Grace: *check.CheckRestart.Grace,
|
2017-09-11 00:00:25 +00:00
|
|
|
IgnoreWarnings: check.CheckRestart.IgnoreWarnings,
|
2017-08-25 00:18:06 +00:00
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
}
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2021-01-20 19:34:23 +00:00
|
|
|
|
|
|
|
// Task services can't have a connect block. We still convert it so that
|
|
|
|
// we can later return a validation error.
|
|
|
|
if service.Connect != nil {
|
|
|
|
structsTask.Services[i].Connect = ApiConsulConnectToStructs(service.Connect)
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2018-10-08 22:38:03 +00:00
|
|
|
structsTask.Resources = ApiResourcesToStructs(apiTask.Resources)
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
structsTask.LogConfig = &structs.LogConfig{
|
|
|
|
MaxFiles: *apiTask.LogConfig.MaxFiles,
|
|
|
|
MaxFileSizeMB: *apiTask.LogConfig.MaxFileSizeMB,
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
|
|
|
if l := len(apiTask.Artifacts); l != 0 {
|
|
|
|
structsTask.Artifacts = make([]*structs.TaskArtifact, l)
|
|
|
|
for k, ta := range apiTask.Artifacts {
|
|
|
|
structsTask.Artifacts[k] = &structs.TaskArtifact{
|
|
|
|
GetterSource: *ta.GetterSource,
|
2020-11-12 16:25:57 +00:00
|
|
|
GetterOptions: helper.CopyMapStringString(ta.GetterOptions),
|
|
|
|
GetterHeaders: helper.CopyMapStringString(ta.GetterHeaders),
|
2017-07-06 03:44:49 +00:00
|
|
|
GetterMode: *ta.GetterMode,
|
2017-04-16 23:54:02 +00:00
|
|
|
RelativeDest: *ta.RelativeDest,
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
if apiTask.Vault != nil {
|
|
|
|
structsTask.Vault = &structs.Vault{
|
|
|
|
Policies: apiTask.Vault.Policies,
|
2020-07-17 14:41:45 +00:00
|
|
|
Namespace: *apiTask.Vault.Namespace,
|
2017-02-06 19:48:28 +00:00
|
|
|
Env: *apiTask.Vault.Env,
|
|
|
|
ChangeMode: *apiTask.Vault.ChangeMode,
|
|
|
|
ChangeSignal: *apiTask.Vault.ChangeSignal,
|
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
|
|
|
if l := len(apiTask.Templates); l != 0 {
|
|
|
|
structsTask.Templates = make([]*structs.Template, l)
|
|
|
|
for i, template := range apiTask.Templates {
|
|
|
|
structsTask.Templates[i] = &structs.Template{
|
|
|
|
SourcePath: *template.SourcePath,
|
|
|
|
DestPath: *template.DestPath,
|
|
|
|
EmbeddedTmpl: *template.EmbeddedTmpl,
|
|
|
|
ChangeMode: *template.ChangeMode,
|
|
|
|
ChangeSignal: *template.ChangeSignal,
|
|
|
|
Splay: *template.Splay,
|
|
|
|
Perms: *template.Perms,
|
|
|
|
LeftDelim: *template.LeftDelim,
|
|
|
|
RightDelim: *template.RightDelim,
|
2017-05-13 00:07:54 +00:00
|
|
|
Envvars: *template.Envvars,
|
2017-08-01 21:14:08 +00:00
|
|
|
VaultGrace: *template.VaultGrace,
|
2017-04-16 23:54:02 +00:00
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
if apiTask.DispatchPayload != nil {
|
|
|
|
structsTask.DispatchPayload = &structs.DispatchPayloadConfig{
|
|
|
|
File: apiTask.DispatchPayload.File,
|
|
|
|
}
|
|
|
|
}
|
2019-10-11 17:25:32 +00:00
|
|
|
|
|
|
|
if apiTask.Lifecycle != nil {
|
|
|
|
structsTask.Lifecycle = &structs.TaskLifecycleConfig{
|
2020-03-02 19:12:16 +00:00
|
|
|
Hook: apiTask.Lifecycle.Hook,
|
|
|
|
Sidecar: apiTask.Lifecycle.Sidecar,
|
2019-10-11 17:25:32 +00:00
|
|
|
}
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2017-02-13 23:18:17 +00:00
|
|
|
|
2019-10-22 13:20:26 +00:00
|
|
|
func ApiCSIPluginConfigToStructsCSIPluginConfig(apiConfig *api.TaskCSIPluginConfig) *structs.TaskCSIPluginConfig {
|
|
|
|
if apiConfig == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
sc := &structs.TaskCSIPluginConfig{}
|
|
|
|
sc.ID = apiConfig.ID
|
|
|
|
sc.Type = structs.CSIPluginType(apiConfig.Type)
|
|
|
|
sc.MountDir = apiConfig.MountDir
|
|
|
|
return sc
|
|
|
|
}
|
|
|
|
|
2018-10-08 22:38:03 +00:00
|
|
|
func ApiResourcesToStructs(in *api.Resources) *structs.Resources {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
out := &structs.Resources{
|
|
|
|
CPU: *in.CPU,
|
|
|
|
MemoryMB: *in.MemoryMB,
|
2018-12-11 23:23:39 +00:00
|
|
|
}
|
|
|
|
|
2021-03-19 03:13:30 +00:00
|
|
|
if in.Cores != nil {
|
|
|
|
out.Cores = *in.Cores
|
|
|
|
}
|
|
|
|
|
2021-03-26 20:07:12 +00:00
|
|
|
if in.MemoryMaxMB != nil {
|
|
|
|
out.MemoryMaxMB = *in.MemoryMaxMB
|
|
|
|
}
|
|
|
|
|
2018-12-11 23:23:39 +00:00
|
|
|
// COMPAT(0.10): Only being used to issue warnings
|
|
|
|
if in.IOPS != nil {
|
|
|
|
out.IOPS = *in.IOPS
|
2018-10-08 22:38:03 +00:00
|
|
|
}
|
|
|
|
|
2019-04-29 17:15:12 +00:00
|
|
|
if len(in.Networks) != 0 {
|
|
|
|
out.Networks = ApiNetworkResourceToStructs(in.Networks)
|
|
|
|
}
|
|
|
|
|
|
|
|
if l := len(in.Devices); l != 0 {
|
|
|
|
out.Devices = make([]*structs.RequestedDevice, l)
|
|
|
|
for i, d := range in.Devices {
|
|
|
|
out.Devices[i] = &structs.RequestedDevice{
|
|
|
|
Name: d.Name,
|
|
|
|
Count: *d.Count,
|
|
|
|
Constraints: ApiConstraintsToStructs(d.Constraints),
|
|
|
|
Affinities: ApiAffinitiesToStructs(d.Affinities),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
func ApiNetworkResourceToStructs(in []*api.NetworkResource) []*structs.NetworkResource {
|
|
|
|
var out []*structs.NetworkResource
|
2019-05-08 17:56:15 +00:00
|
|
|
if len(in) == 0 {
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
out = make([]*structs.NetworkResource, len(in))
|
|
|
|
for i, nw := range in {
|
|
|
|
out[i] = &structs.NetworkResource{
|
|
|
|
Mode: nw.Mode,
|
|
|
|
CIDR: nw.CIDR,
|
|
|
|
IP: nw.IP,
|
2020-09-16 17:35:51 +00:00
|
|
|
MBits: nw.Megabits(),
|
2019-05-08 17:56:15 +00:00
|
|
|
}
|
2018-10-08 22:38:03 +00:00
|
|
|
|
2020-04-28 03:11:06 +00:00
|
|
|
if nw.DNS != nil {
|
|
|
|
out[i].DNS = &structs.DNSConfig{
|
|
|
|
Servers: nw.DNS.Servers,
|
|
|
|
Searches: nw.DNS.Searches,
|
|
|
|
Options: nw.DNS.Options,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-08 17:56:15 +00:00
|
|
|
if l := len(nw.DynamicPorts); l != 0 {
|
|
|
|
out[i].DynamicPorts = make([]structs.Port, l)
|
|
|
|
for j, dp := range nw.DynamicPorts {
|
2020-06-16 15:53:10 +00:00
|
|
|
out[i].DynamicPorts[j] = ApiPortToStructs(dp)
|
2018-10-08 22:38:03 +00:00
|
|
|
}
|
2019-05-08 17:56:15 +00:00
|
|
|
}
|
2018-10-08 22:38:03 +00:00
|
|
|
|
2019-05-08 17:56:15 +00:00
|
|
|
if l := len(nw.ReservedPorts); l != 0 {
|
|
|
|
out[i].ReservedPorts = make([]structs.Port, l)
|
|
|
|
for j, rp := range nw.ReservedPorts {
|
2020-06-16 15:53:10 +00:00
|
|
|
out[i].ReservedPorts[j] = ApiPortToStructs(rp)
|
2018-10-08 22:38:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
2020-06-16 15:53:10 +00:00
|
|
|
func ApiPortToStructs(in api.Port) structs.Port {
|
|
|
|
return structs.Port{
|
|
|
|
Label: in.Label,
|
|
|
|
Value: in.Value,
|
|
|
|
To: in.To,
|
|
|
|
HostNetwork: in.HostNetwork,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-24 15:29:26 +00:00
|
|
|
//TODO(schmichael) refactor and reuse in service parsing above
|
|
|
|
func ApiServicesToStructs(in []*api.Service) []*structs.Service {
|
|
|
|
if len(in) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
out := make([]*structs.Service, len(in))
|
|
|
|
for i, s := range in {
|
|
|
|
out[i] = &structs.Service{
|
client: enable configuring enable_tag_override for services
Consul provides a feature of Service Definitions where the tags
associated with a service can be modified through the Catalog API,
overriding the value(s) configured in the agent's service configuration.
To enable this feature, the flag enable_tag_override must be configured
in the service definition.
Previously, Nomad did not allow configuring this flag, and thus the default
value of false was used. Now, it is configurable.
Because Nomad itself acts as a state machine around the the service definitions
of the tasks it manages, it's worth describing what happens when this feature
is enabled and why.
Consider the basic case where there is no Nomad, and your service is provided
to consul as a boring JSON file. The ultimate source of truth for the definition
of that service is the file, and is stored in the agent. Later, Consul performs
"anti-entropy" which synchronizes the Catalog (stored only the leaders). Then
with enable_tag_override=true, the tags field is available for "external"
modification through the Catalog API (rather than directly configuring the
service definition file, or using the Agent API). The important observation
is that if the service definition ever changes (i.e. the file is changed &
config reloaded OR the Agent API is used to modify the service), those
"external" tag values are thrown away, and the new service definition is
once again the source of truth.
In the Nomad case, Nomad itself is the source of truth over the Agent in
the same way the JSON file was the source of truth in the example above.
That means any time Nomad sets a new service definition, any externally
configured tags are going to be replaced. When does this happen? Only on
major lifecycle events, for example when a task is modified because of an
updated job spec from the 'nomad job run <existing>' command. Otherwise,
Nomad's periodic re-sync's with Consul will now no longer try to restore
the externally modified tag values (as long as enable_tag_override=true).
Fixes #2057
2020-02-07 21:22:19 +00:00
|
|
|
Name: s.Name,
|
|
|
|
PortLabel: s.PortLabel,
|
2020-06-22 17:55:59 +00:00
|
|
|
TaskName: s.TaskName,
|
client: enable configuring enable_tag_override for services
Consul provides a feature of Service Definitions where the tags
associated with a service can be modified through the Catalog API,
overriding the value(s) configured in the agent's service configuration.
To enable this feature, the flag enable_tag_override must be configured
in the service definition.
Previously, Nomad did not allow configuring this flag, and thus the default
value of false was used. Now, it is configurable.
Because Nomad itself acts as a state machine around the the service definitions
of the tasks it manages, it's worth describing what happens when this feature
is enabled and why.
Consider the basic case where there is no Nomad, and your service is provided
to consul as a boring JSON file. The ultimate source of truth for the definition
of that service is the file, and is stored in the agent. Later, Consul performs
"anti-entropy" which synchronizes the Catalog (stored only the leaders). Then
with enable_tag_override=true, the tags field is available for "external"
modification through the Catalog API (rather than directly configuring the
service definition file, or using the Agent API). The important observation
is that if the service definition ever changes (i.e. the file is changed &
config reloaded OR the Agent API is used to modify the service), those
"external" tag values are thrown away, and the new service definition is
once again the source of truth.
In the Nomad case, Nomad itself is the source of truth over the Agent in
the same way the JSON file was the source of truth in the example above.
That means any time Nomad sets a new service definition, any externally
configured tags are going to be replaced. When does this happen? Only on
major lifecycle events, for example when a task is modified because of an
updated job spec from the 'nomad job run <existing>' command. Otherwise,
Nomad's periodic re-sync's with Consul will now no longer try to restore
the externally modified tag values (as long as enable_tag_override=true).
Fixes #2057
2020-02-07 21:22:19 +00:00
|
|
|
Tags: s.Tags,
|
|
|
|
CanaryTags: s.CanaryTags,
|
|
|
|
EnableTagOverride: s.EnableTagOverride,
|
|
|
|
AddressMode: s.AddressMode,
|
|
|
|
Meta: helper.CopyMapStringString(s.Meta),
|
|
|
|
CanaryMeta: helper.CopyMapStringString(s.CanaryMeta),
|
2021-01-22 19:45:26 +00:00
|
|
|
OnUpdate: s.OnUpdate,
|
2019-06-24 15:29:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if l := len(s.Checks); l != 0 {
|
|
|
|
out[i].Checks = make([]*structs.ServiceCheck, l)
|
|
|
|
for j, check := range s.Checks {
|
2021-01-22 19:45:26 +00:00
|
|
|
onUpdate := s.OnUpdate // Inherit from service as default
|
|
|
|
if check.OnUpdate != "" {
|
|
|
|
onUpdate = check.OnUpdate
|
|
|
|
}
|
2019-06-24 15:29:26 +00:00
|
|
|
out[i].Checks[j] = &structs.ServiceCheck{
|
|
|
|
Name: check.Name,
|
|
|
|
Type: check.Type,
|
|
|
|
Command: check.Command,
|
|
|
|
Args: check.Args,
|
|
|
|
Path: check.Path,
|
|
|
|
Protocol: check.Protocol,
|
|
|
|
PortLabel: check.PortLabel,
|
connect: enable automatic expose paths for individual group service checks
Part of #6120
Building on the support for enabling connect proxy paths in #7323, this change
adds the ability to configure the 'service.check.expose' flag on group-level
service check definitions for services that are connect-enabled. This is a slight
deviation from the "magic" that Consul provides. With Consul, the 'expose' flag
exists on the connect.proxy stanza, which will then auto-generate expose paths
for every HTTP and gRPC service check associated with that connect-enabled
service.
A first attempt at providing similar magic for Nomad's Consul Connect integration
followed that pattern exactly, as seen in #7396. However, on reviewing the PR
we realized having the `expose` flag on the proxy stanza inseperably ties together
the automatic path generation with every HTTP/gRPC defined on the service. This
makes sense in Consul's context, because a service definition is reasonably
associated with a single "task". With Nomad's group level service definitions
however, there is a reasonable expectation that a service definition is more
abstractly representative of multiple services within the task group. In this
case, one would want to define checks of that service which concretely make HTTP
or gRPC requests to different underlying tasks. Such a model is not possible
with the course `proxy.expose` flag.
Instead, we now have the flag made available within the check definitions themselves.
By making the expose feature resolute to each check, it is possible to have
some HTTP/gRPC checks which make use of the envoy exposed paths, as well as
some HTTP/gRPC checks which make use of some orthongonal port-mapping to do
checks on some other task (or even some other bound port of the same task)
within the task group.
Given this example,
group "server-group" {
network {
mode = "bridge"
port "forchecks" {
to = -1
}
}
service {
name = "myserver"
port = 2000
connect {
sidecar_service {
}
}
check {
name = "mycheck-myserver"
type = "http"
port = "forchecks"
interval = "3s"
timeout = "2s"
method = "GET"
path = "/classic/responder/health"
expose = true
}
}
}
Nomad will automatically inject (via job endpoint mutator) the
extrapolated expose path configuration, i.e.
expose {
path {
path = "/classic/responder/health"
protocol = "http"
local_path_port = 2000
listener_port = "forchecks"
}
}
Documentation is coming in #7440 (needs updating, doing next)
Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6
which will make the examples in the documentation actually runnable.
Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
|
|
|
Expose: check.Expose,
|
2019-06-24 15:29:26 +00:00
|
|
|
AddressMode: check.AddressMode,
|
|
|
|
Interval: check.Interval,
|
|
|
|
Timeout: check.Timeout,
|
|
|
|
InitialStatus: check.InitialStatus,
|
|
|
|
TLSSkipVerify: check.TLSSkipVerify,
|
|
|
|
Header: check.Header,
|
|
|
|
Method: check.Method,
|
2021-03-25 01:51:13 +00:00
|
|
|
Body: check.Body,
|
2019-06-24 15:29:26 +00:00
|
|
|
GRPCService: check.GRPCService,
|
|
|
|
GRPCUseTLS: check.GRPCUseTLS,
|
2019-08-19 13:17:38 +00:00
|
|
|
TaskName: check.TaskName,
|
2021-01-22 19:45:26 +00:00
|
|
|
OnUpdate: onUpdate,
|
2019-06-24 15:29:26 +00:00
|
|
|
}
|
|
|
|
if check.CheckRestart != nil {
|
|
|
|
out[i].Checks[j].CheckRestart = &structs.CheckRestart{
|
|
|
|
Limit: check.CheckRestart.Limit,
|
|
|
|
Grace: *check.CheckRestart.Grace,
|
|
|
|
IgnoreWarnings: check.CheckRestart.IgnoreWarnings,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
if s.Connect != nil {
|
|
|
|
out[i].Connect = ApiConsulConnectToStructs(s.Connect)
|
2019-06-24 15:29:26 +00:00
|
|
|
}
|
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
}
|
2019-06-24 15:29:26 +00:00
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
return out
|
|
|
|
}
|
2019-06-24 15:29:26 +00:00
|
|
|
|
2019-08-09 19:18:53 +00:00
|
|
|
func ApiConsulConnectToStructs(in *api.ConsulConnect) *structs.ConsulConnect {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2020-03-07 03:15:22 +00:00
|
|
|
return &structs.ConsulConnect{
|
|
|
|
Native: in.Native,
|
|
|
|
SidecarService: apiConnectSidecarServiceToStructs(in.SidecarService),
|
|
|
|
SidecarTask: apiConnectSidecarTaskToStructs(in.SidecarTask),
|
2020-07-28 20:12:08 +00:00
|
|
|
Gateway: apiConnectGatewayToStructs(in.Gateway),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func apiConnectGatewayToStructs(in *api.ConsulGateway) *structs.ConsulGateway {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &structs.ConsulGateway{
|
2020-12-15 20:38:33 +00:00
|
|
|
Proxy: apiConnectGatewayProxyToStructs(in.Proxy),
|
|
|
|
Ingress: apiConnectIngressGatewayToStructs(in.Ingress),
|
|
|
|
Terminating: apiConnectTerminatingGatewayToStructs(in.Terminating),
|
2020-07-28 20:12:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func apiConnectGatewayProxyToStructs(in *api.ConsulGatewayProxy) *structs.ConsulGatewayProxy {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-01-22 14:18:17 +00:00
|
|
|
bindAddresses := make(map[string]*structs.ConsulGatewayBindAddress)
|
2020-07-28 20:12:08 +00:00
|
|
|
if in.EnvoyGatewayBindAddresses != nil {
|
|
|
|
for k, v := range in.EnvoyGatewayBindAddresses {
|
|
|
|
bindAddresses[k] = &structs.ConsulGatewayBindAddress{
|
|
|
|
Address: v.Address,
|
|
|
|
Port: v.Port,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &structs.ConsulGatewayProxy{
|
|
|
|
ConnectTimeout: in.ConnectTimeout,
|
|
|
|
EnvoyGatewayBindTaggedAddresses: in.EnvoyGatewayBindTaggedAddresses,
|
|
|
|
EnvoyGatewayBindAddresses: bindAddresses,
|
|
|
|
EnvoyGatewayNoDefaultBind: in.EnvoyGatewayNoDefaultBind,
|
2020-12-15 20:38:33 +00:00
|
|
|
EnvoyDNSDiscoveryType: in.EnvoyDNSDiscoveryType,
|
2020-07-28 20:12:08 +00:00
|
|
|
Config: helper.CopyMapStringInterface(in.Config),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func apiConnectIngressGatewayToStructs(in *api.ConsulIngressConfigEntry) *structs.ConsulIngressConfigEntry {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &structs.ConsulIngressConfigEntry{
|
|
|
|
TLS: apiConnectGatewayTLSConfig(in.TLS),
|
|
|
|
Listeners: apiConnectIngressListenersToStructs(in.Listeners),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func apiConnectGatewayTLSConfig(in *api.ConsulGatewayTLSConfig) *structs.ConsulGatewayTLSConfig {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &structs.ConsulGatewayTLSConfig{
|
|
|
|
Enabled: in.Enabled,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func apiConnectIngressListenersToStructs(in []*api.ConsulIngressListener) []*structs.ConsulIngressListener {
|
|
|
|
if len(in) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
listeners := make([]*structs.ConsulIngressListener, len(in))
|
|
|
|
for i, listener := range in {
|
|
|
|
listeners[i] = apiConnectIngressListenerToStructs(listener)
|
|
|
|
}
|
|
|
|
return listeners
|
|
|
|
}
|
|
|
|
|
|
|
|
func apiConnectIngressListenerToStructs(in *api.ConsulIngressListener) *structs.ConsulIngressListener {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &structs.ConsulIngressListener{
|
|
|
|
Port: in.Port,
|
|
|
|
Protocol: in.Protocol,
|
|
|
|
Services: apiConnectIngressServicesToStructs(in.Services),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func apiConnectIngressServicesToStructs(in []*api.ConsulIngressService) []*structs.ConsulIngressService {
|
|
|
|
if len(in) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
services := make([]*structs.ConsulIngressService, len(in))
|
|
|
|
for i, service := range in {
|
|
|
|
services[i] = apiConnectIngressServiceToStructs(service)
|
|
|
|
}
|
|
|
|
return services
|
|
|
|
}
|
|
|
|
|
|
|
|
func apiConnectIngressServiceToStructs(in *api.ConsulIngressService) *structs.ConsulIngressService {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &structs.ConsulIngressService{
|
|
|
|
Name: in.Name,
|
|
|
|
Hosts: helper.CopySliceString(in.Hosts),
|
2020-03-07 03:15:22 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-24 15:29:26 +00:00
|
|
|
|
2020-12-15 20:38:33 +00:00
|
|
|
func apiConnectTerminatingGatewayToStructs(in *api.ConsulTerminatingConfigEntry) *structs.ConsulTerminatingConfigEntry {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &structs.ConsulTerminatingConfigEntry{
|
|
|
|
Services: apiConnectTerminatingServicesToStructs(in.Services),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func apiConnectTerminatingServicesToStructs(in []*api.ConsulLinkedService) []*structs.ConsulLinkedService {
|
|
|
|
if len(in) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
services := make([]*structs.ConsulLinkedService, len(in))
|
|
|
|
for i, service := range in {
|
|
|
|
services[i] = apiConnectTerminatingServiceToStructs(service)
|
|
|
|
}
|
|
|
|
return services
|
|
|
|
}
|
|
|
|
|
|
|
|
func apiConnectTerminatingServiceToStructs(in *api.ConsulLinkedService) *structs.ConsulLinkedService {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &structs.ConsulLinkedService{
|
|
|
|
Name: in.Name,
|
|
|
|
CAFile: in.CAFile,
|
|
|
|
CertFile: in.CertFile,
|
|
|
|
KeyFile: in.KeyFile,
|
|
|
|
SNI: in.SNI,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-07 03:15:22 +00:00
|
|
|
func apiConnectSidecarServiceToStructs(in *api.ConsulSidecarService) *structs.ConsulSidecarService {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &structs.ConsulSidecarService{
|
|
|
|
Port: in.Port,
|
|
|
|
Tags: helper.CopySliceString(in.Tags),
|
|
|
|
Proxy: apiConnectSidecarServiceProxyToStructs(in.Proxy),
|
2019-08-09 19:18:53 +00:00
|
|
|
}
|
2020-03-07 03:15:22 +00:00
|
|
|
}
|
2019-08-09 19:18:53 +00:00
|
|
|
|
2020-03-07 03:15:22 +00:00
|
|
|
func apiConnectSidecarServiceProxyToStructs(in *api.ConsulProxy) *structs.ConsulProxy {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &structs.ConsulProxy{
|
|
|
|
LocalServiceAddress: in.LocalServiceAddress,
|
|
|
|
LocalServicePort: in.LocalServicePort,
|
|
|
|
Upstreams: apiUpstreamsToStructs(in.Upstreams),
|
|
|
|
Expose: apiConsulExposeConfigToStructs(in.ExposeConfig),
|
2020-07-28 20:12:08 +00:00
|
|
|
Config: helper.CopyMapStringInterface(in.Config),
|
2020-03-07 03:15:22 +00:00
|
|
|
}
|
|
|
|
}
|
2019-06-24 15:29:26 +00:00
|
|
|
|
2020-03-07 03:15:22 +00:00
|
|
|
func apiUpstreamsToStructs(in []*api.ConsulUpstream) []structs.ConsulUpstream {
|
|
|
|
if len(in) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
upstreams := make([]structs.ConsulUpstream, len(in))
|
|
|
|
for i, upstream := range in {
|
|
|
|
upstreams[i] = structs.ConsulUpstream{
|
2021-02-23 15:49:18 +00:00
|
|
|
DestinationName: upstream.DestinationName,
|
|
|
|
LocalBindPort: upstream.LocalBindPort,
|
|
|
|
Datacenter: upstream.Datacenter,
|
|
|
|
LocalBindAddress: upstream.LocalBindAddress,
|
2019-07-30 22:40:45 +00:00
|
|
|
}
|
2020-03-07 03:15:22 +00:00
|
|
|
}
|
|
|
|
return upstreams
|
|
|
|
}
|
2019-06-24 15:29:26 +00:00
|
|
|
|
2020-03-07 03:15:22 +00:00
|
|
|
func apiConsulExposeConfigToStructs(in *api.ConsulExposeConfig) *structs.ConsulExposeConfig {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &structs.ConsulExposeConfig{
|
2020-03-12 14:37:31 +00:00
|
|
|
Paths: apiConsulExposePathsToStructs(in.Path),
|
2020-03-07 03:15:22 +00:00
|
|
|
}
|
|
|
|
}
|
2019-08-09 19:18:53 +00:00
|
|
|
|
2020-03-07 03:15:22 +00:00
|
|
|
func apiConsulExposePathsToStructs(in []*api.ConsulExposePath) []structs.ConsulExposePath {
|
|
|
|
if len(in) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
paths := make([]structs.ConsulExposePath, len(in))
|
|
|
|
for i, path := range in {
|
|
|
|
paths[i] = structs.ConsulExposePath{
|
|
|
|
Path: path.Path,
|
|
|
|
Protocol: path.Protocol,
|
|
|
|
LocalPathPort: path.LocalPathPort,
|
|
|
|
ListenerPort: path.ListenerPort,
|
2019-06-24 15:29:26 +00:00
|
|
|
}
|
2019-08-09 19:18:53 +00:00
|
|
|
}
|
2020-03-07 03:15:22 +00:00
|
|
|
return paths
|
|
|
|
}
|
2019-06-24 15:29:26 +00:00
|
|
|
|
2020-03-07 03:15:22 +00:00
|
|
|
func apiConnectSidecarTaskToStructs(in *api.SidecarTask) *structs.SidecarTask {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &structs.SidecarTask{
|
|
|
|
Name: in.Name,
|
|
|
|
Driver: in.Driver,
|
|
|
|
User: in.User,
|
|
|
|
Config: in.Config,
|
|
|
|
Env: in.Env,
|
|
|
|
Resources: ApiResourcesToStructs(in.Resources),
|
|
|
|
Meta: in.Meta,
|
|
|
|
ShutdownDelay: in.ShutdownDelay,
|
|
|
|
KillSignal: in.KillSignal,
|
|
|
|
KillTimeout: in.KillTimeout,
|
|
|
|
LogConfig: apiLogConfigToStructs(in.LogConfig),
|
|
|
|
}
|
|
|
|
}
|
2019-08-20 05:22:46 +00:00
|
|
|
|
2021-03-16 18:22:21 +00:00
|
|
|
func apiConsulToStructs(in *api.Consul) *structs.Consul {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &structs.Consul{
|
|
|
|
Namespace: in.Namespace,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-07 03:15:22 +00:00
|
|
|
func apiLogConfigToStructs(in *api.LogConfig) *structs.LogConfig {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
2019-06-24 15:29:26 +00:00
|
|
|
}
|
2020-03-07 03:15:22 +00:00
|
|
|
return &structs.LogConfig{
|
|
|
|
MaxFiles: dereferenceInt(in.MaxFiles),
|
|
|
|
MaxFileSizeMB: dereferenceInt(in.MaxFileSizeMB),
|
|
|
|
}
|
|
|
|
}
|
2019-06-24 15:29:26 +00:00
|
|
|
|
2020-03-07 03:15:22 +00:00
|
|
|
func dereferenceInt(in *int) int {
|
|
|
|
if in == nil {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
return *in
|
2019-06-24 15:29:26 +00:00
|
|
|
}
|
|
|
|
|
2018-10-11 21:05:19 +00:00
|
|
|
func ApiConstraintsToStructs(in []*api.Constraint) []*structs.Constraint {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
out := make([]*structs.Constraint, len(in))
|
|
|
|
for i, ac := range in {
|
|
|
|
out[i] = ApiConstraintToStructs(ac)
|
|
|
|
}
|
|
|
|
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
|
|
|
func ApiConstraintToStructs(in *api.Constraint) *structs.Constraint {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &structs.Constraint{
|
|
|
|
LTarget: in.LTarget,
|
|
|
|
RTarget: in.RTarget,
|
|
|
|
Operand: in.Operand,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func ApiAffinitiesToStructs(in []*api.Affinity) []*structs.Affinity {
|
|
|
|
if in == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
out := make([]*structs.Affinity, len(in))
|
|
|
|
for i, ac := range in {
|
|
|
|
out[i] = ApiAffinityToStructs(ac)
|
|
|
|
}
|
|
|
|
|
|
|
|
return out
|
2017-02-13 23:18:17 +00:00
|
|
|
}
|
2018-07-16 13:30:58 +00:00
|
|
|
|
2018-07-18 19:16:02 +00:00
|
|
|
func ApiAffinityToStructs(a1 *api.Affinity) *structs.Affinity {
|
|
|
|
return &structs.Affinity{
|
|
|
|
LTarget: a1.LTarget,
|
|
|
|
Operand: a1.Operand,
|
|
|
|
RTarget: a1.RTarget,
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: *a1.Weight,
|
2018-07-18 19:16:02 +00:00
|
|
|
}
|
2018-07-16 13:30:58 +00:00
|
|
|
}
|
2018-07-18 15:53:03 +00:00
|
|
|
|
|
|
|
func ApiSpreadToStructs(a1 *api.Spread) *structs.Spread {
|
|
|
|
ret := &structs.Spread{}
|
|
|
|
ret.Attribute = a1.Attribute
|
2019-01-11 15:48:12 +00:00
|
|
|
ret.Weight = *a1.Weight
|
2018-07-18 15:53:03 +00:00
|
|
|
if a1.SpreadTarget != nil {
|
|
|
|
ret.SpreadTarget = make([]*structs.SpreadTarget, len(a1.SpreadTarget))
|
|
|
|
for i, st := range a1.SpreadTarget {
|
|
|
|
ret.SpreadTarget[i] = &structs.SpreadTarget{
|
|
|
|
Value: st.Value,
|
|
|
|
Percent: st.Percent,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|