open-nomad/command/agent/job_endpoint.go

1271 lines
36 KiB
Go
Raw Normal View History

2015-09-06 00:06:05 +00:00
package agent
import (
2017-04-15 03:54:30 +00:00
"fmt"
2015-09-06 00:06:05 +00:00
"net/http"
"regexp"
"strconv"
2015-09-06 01:00:30 +00:00
"strings"
2015-09-06 01:20:47 +00:00
2016-11-29 00:05:56 +00:00
"github.com/golang/snappy"
2017-02-06 19:48:28 +00:00
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/jobspec"
2015-09-06 01:20:47 +00:00
"github.com/hashicorp/nomad/nomad/structs"
2015-09-06 00:06:05 +00:00
)
2015-09-06 01:00:30 +00:00
func (s *HTTPServer) JobsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
switch req.Method {
case "GET":
return s.jobListRequest(resp, req)
case "PUT", "POST":
2015-09-06 18:47:52 +00:00
return s.jobUpdate(resp, req, "")
2015-09-06 01:00:30 +00:00
default:
return nil, CodedError(405, ErrInvalidMethod)
2015-09-06 00:06:05 +00:00
}
2015-09-06 01:00:30 +00:00
}
2015-09-06 00:06:05 +00:00
2015-09-06 01:00:30 +00:00
func (s *HTTPServer) jobListRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
2015-09-06 19:32:22 +00:00
args := structs.JobListRequest{}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.JobListResponse
if err := s.agent.RPC("Job.List", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
2015-09-07 17:03:10 +00:00
if out.Jobs == nil {
out.Jobs = make([]*structs.JobListStub, 0)
}
2015-09-06 19:32:22 +00:00
return out.Jobs, nil
2015-09-06 01:00:30 +00:00
}
func (s *HTTPServer) JobSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
path := strings.TrimPrefix(req.URL.Path, "/v1/job/")
switch {
case strings.HasSuffix(path, "/evaluate"):
jobName := strings.TrimSuffix(path, "/evaluate")
return s.jobForceEvaluate(resp, req, jobName)
case strings.HasSuffix(path, "/allocations"):
jobName := strings.TrimSuffix(path, "/allocations")
return s.jobAllocations(resp, req, jobName)
case strings.HasSuffix(path, "/evaluations"):
jobName := strings.TrimSuffix(path, "/evaluations")
return s.jobEvaluations(resp, req, jobName)
2016-01-19 19:09:36 +00:00
case strings.HasSuffix(path, "/periodic/force"):
jobName := strings.TrimSuffix(path, "/periodic/force")
return s.periodicForceRequest(resp, req, jobName)
case strings.HasSuffix(path, "/plan"):
jobName := strings.TrimSuffix(path, "/plan")
return s.jobPlan(resp, req, jobName)
case strings.HasSuffix(path, "/summary"):
jobName := strings.TrimSuffix(path, "/summary")
return s.jobSummaryRequest(resp, req, jobName)
2016-11-26 02:04:55 +00:00
case strings.HasSuffix(path, "/dispatch"):
jobName := strings.TrimSuffix(path, "/dispatch")
return s.jobDispatchRequest(resp, req, jobName)
2017-04-13 23:55:21 +00:00
case strings.HasSuffix(path, "/versions"):
jobName := strings.TrimSuffix(path, "/versions")
return s.jobVersions(resp, req, jobName)
2017-04-19 18:33:06 +00:00
case strings.HasSuffix(path, "/revert"):
jobName := strings.TrimSuffix(path, "/revert")
return s.jobRevert(resp, req, jobName)
2017-07-01 00:23:34 +00:00
case strings.HasSuffix(path, "/deployments"):
jobName := strings.TrimSuffix(path, "/deployments")
return s.jobDeployments(resp, req, jobName)
case strings.HasSuffix(path, "/deployment"):
jobName := strings.TrimSuffix(path, "/deployment")
return s.jobLatestDeployment(resp, req, jobName)
2017-07-06 19:49:13 +00:00
case strings.HasSuffix(path, "/stable"):
jobName := strings.TrimSuffix(path, "/stable")
return s.jobStable(resp, req, jobName)
case strings.HasSuffix(path, "/scale"):
return s.jobScale(resp, req, path)
2015-09-06 01:00:30 +00:00
default:
return s.jobCRUD(resp, req, path)
2015-09-06 00:06:05 +00:00
}
2015-09-06 01:00:30 +00:00
}
2015-09-06 00:06:05 +00:00
2015-09-06 01:00:30 +00:00
func (s *HTTPServer) jobForceEvaluate(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
2015-09-06 00:06:05 +00:00
}
var args structs.JobEvaluateRequest
2018-05-10 19:42:24 +00:00
// TODO(preetha): remove in 0.9
// COMPAT: For backwards compatibility allow using this endpoint without a payload
if req.ContentLength == 0 {
args = structs.JobEvaluateRequest{
JobID: jobName,
}
} else {
if err := decodeBody(req, &args); err != nil {
return nil, CodedError(400, err.Error())
}
if args.JobID == "" {
return nil, CodedError(400, "Job ID must be specified")
}
if jobName != "" && args.JobID != jobName {
return nil, CodedError(400, "JobID not same as job name")
}
}
2017-09-07 23:56:15 +00:00
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.JobRegisterResponse
if err := s.agent.RPC("Job.Evaluate", &args, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
2015-09-06 00:06:05 +00:00
}
func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
var args api.JobPlanRequest
if err := decodeBody(req, &args); err != nil {
return nil, CodedError(400, err.Error())
}
if args.Job == nil {
return nil, CodedError(400, "Job must be specified")
}
if args.Job.ID == nil {
return nil, CodedError(400, "Job must have a valid ID")
}
if jobName != "" && *args.Job.ID != jobName {
return nil, CodedError(400, "Job ID does not match")
}
// Region in http request query param takes precedence over region in job hcl config
if args.WriteRequest.Region != "" {
args.Job.Region = helper.StringToPtr(args.WriteRequest.Region)
}
// If 'global' region is specified or if no region is given,
// default to region of the node you're submitting to
if args.Job.Region == nil || *args.Job.Region == "" || *args.Job.Region == api.GlobalRegion {
args.Job.Region = &s.agent.config.Region
}
sJob := ApiJobToStructJob(args.Job)
planReq := structs.JobPlanRequest{
2017-09-19 14:47:10 +00:00
Job: sJob,
Diff: args.Diff,
PolicyOverride: args.PolicyOverride,
WriteRequest: structs.WriteRequest{
Region: sJob.Region,
},
}
// parseWriteRequest overrides Namespace, Region and AuthToken
// based on values from the original http request
2017-09-07 23:56:15 +00:00
s.parseWriteRequest(req, &planReq.WriteRequest)
2017-10-23 23:07:38 +00:00
planReq.Namespace = sJob.Namespace
var out structs.JobPlanResponse
if err := s.agent.RPC("Job.Plan", &planReq, &out); err != nil {
return nil, err
}
2016-05-12 01:51:48 +00:00
setIndex(resp, out.Index)
return out, nil
}
2017-02-06 19:48:28 +00:00
func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Ensure request method is POST or PUT
if !(req.Method == "POST" || req.Method == "PUT") {
return nil, CodedError(405, ErrInvalidMethod)
}
var validateRequest api.JobValidateRequest
if err := decodeBody(req, &validateRequest); err != nil {
return nil, CodedError(400, err.Error())
}
if validateRequest.Job == nil {
return nil, CodedError(400, "Job must be specified")
}
job := ApiJobToStructJob(validateRequest.Job)
2017-02-06 19:48:28 +00:00
args := structs.JobValidateRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: validateRequest.Region,
},
}
2017-09-07 23:56:15 +00:00
s.parseWriteRequest(req, &args.WriteRequest)
2017-10-23 23:07:38 +00:00
args.Namespace = job.Namespace
2017-02-06 19:48:28 +00:00
var out structs.JobValidateResponse
if err := s.agent.RPC("Job.Validate", &args, &out); err != nil {
return nil, err
2017-02-06 19:48:28 +00:00
}
return out, nil
}
2016-01-19 19:09:36 +00:00
func (s *HTTPServer) periodicForceRequest(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.PeriodicForceRequest{
JobID: jobName,
}
2017-09-07 23:56:15 +00:00
s.parseWriteRequest(req, &args.WriteRequest)
2016-01-19 19:09:36 +00:00
var out structs.PeriodicForceResponse
if err := s.agent.RPC("Periodic.Force", &args, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
2015-09-06 01:00:30 +00:00
func (s *HTTPServer) jobAllocations(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
2015-09-06 00:06:05 +00:00
}
allAllocs, _ := strconv.ParseBool(req.URL.Query().Get("all"))
2015-09-06 19:32:22 +00:00
args := structs.JobSpecificRequest{
JobID: jobName,
All: allAllocs,
2015-09-06 19:32:22 +00:00
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.JobAllocationsResponse
if err := s.agent.RPC("Job.Allocations", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
2015-09-07 17:03:10 +00:00
if out.Allocations == nil {
out.Allocations = make([]*structs.AllocListStub, 0)
}
for _, alloc := range out.Allocations {
alloc.SetEventDisplayMessages()
}
2015-09-06 19:32:22 +00:00
return out.Allocations, nil
2015-09-06 01:00:30 +00:00
}
2015-09-06 00:06:05 +00:00
2015-09-06 01:00:30 +00:00
func (s *HTTPServer) jobEvaluations(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
2015-09-06 00:06:05 +00:00
}
2015-09-06 19:32:22 +00:00
args := structs.JobSpecificRequest{
JobID: jobName,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.JobEvaluationsResponse
if err := s.agent.RPC("Job.Evaluations", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
2015-09-07 17:03:10 +00:00
if out.Evaluations == nil {
out.Evaluations = make([]*structs.Evaluation, 0)
}
2015-09-06 19:32:22 +00:00
return out.Evaluations, nil
2015-09-06 01:00:30 +00:00
}
2015-09-06 00:06:05 +00:00
2017-07-01 00:23:34 +00:00
func (s *HTTPServer) jobDeployments(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
all, _ := strconv.ParseBool(req.URL.Query().Get("all"))
2017-07-01 00:23:34 +00:00
args := structs.JobSpecificRequest{
JobID: jobName,
All: all,
2017-07-01 00:23:34 +00:00
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.DeploymentListResponse
if err := s.agent.RPC("Job.Deployments", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Deployments == nil {
out.Deployments = make([]*structs.Deployment, 0)
}
return out.Deployments, nil
}
func (s *HTTPServer) jobLatestDeployment(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.JobSpecificRequest{
JobID: jobName,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.SingleDeploymentResponse
if err := s.agent.RPC("Job.LatestDeployment", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
return out.Deployment, nil
}
2015-09-06 01:00:30 +00:00
func (s *HTTPServer) jobCRUD(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
switch req.Method {
case "GET":
return s.jobQuery(resp, req, jobName)
case "PUT", "POST":
return s.jobUpdate(resp, req, jobName)
case "DELETE":
return s.jobDelete(resp, req, jobName)
default:
return nil, CodedError(405, ErrInvalidMethod)
2015-09-06 00:06:05 +00:00
}
2015-09-06 01:00:30 +00:00
}
func (s *HTTPServer) jobQuery(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
2015-09-06 01:43:40 +00:00
args := structs.JobSpecificRequest{
JobID: jobName,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.SingleJobResponse
if err := s.agent.RPC("Job.GetJob", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Job == nil {
return nil, CodedError(404, "job not found")
}
2016-11-29 00:05:56 +00:00
2016-12-14 20:50:08 +00:00
// Decode the payload if there is any
2016-11-29 00:05:56 +00:00
job := out.Job
2016-12-14 20:50:08 +00:00
if len(job.Payload) != 0 {
decoded, err := snappy.Decode(nil, out.Job.Payload)
2016-11-29 00:05:56 +00:00
if err != nil {
return nil, err
}
job = job.Copy()
2016-12-14 20:50:08 +00:00
job.Payload = decoded
2016-11-29 00:05:56 +00:00
}
return job, nil
2015-09-06 01:00:30 +00:00
}
func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
var args api.JobRegisterRequest
if err := decodeBody(req, &args); err != nil {
2015-09-06 02:08:47 +00:00
return nil, CodedError(400, err.Error())
}
if args.Job == nil {
return nil, CodedError(400, "Job must be specified")
}
if args.Job.ID == nil {
return nil, CodedError(400, "Job ID hasn't been provided")
}
if jobName != "" && *args.Job.ID != jobName {
return nil, CodedError(400, "Job ID does not match name")
2015-09-06 02:08:47 +00:00
}
// Region in http request query param takes precedence over region in job hcl config
if args.WriteRequest.Region != "" {
args.Job.Region = helper.StringToPtr(args.WriteRequest.Region)
}
// If 'global' region is specified or if no region is given,
// default to region of the node you're submitting to
if args.Job.Region == nil || *args.Job.Region == "" || *args.Job.Region == api.GlobalRegion {
args.Job.Region = &s.agent.config.Region
}
sJob := ApiJobToStructJob(args.Job)
regReq := structs.JobRegisterRequest{
Job: sJob,
EnforceIndex: args.EnforceIndex,
JobModifyIndex: args.JobModifyIndex,
2017-09-19 14:47:10 +00:00
PolicyOverride: args.PolicyOverride,
WriteRequest: structs.WriteRequest{
Region: sJob.Region,
2017-10-12 22:16:33 +00:00
AuthToken: args.WriteRequest.SecretID,
},
}
// parseWriteRequest overrides Namespace, Region and AuthToken
// based on values from the original http request
2017-09-07 23:56:15 +00:00
s.parseWriteRequest(req, &regReq.WriteRequest)
2017-10-23 23:07:38 +00:00
regReq.Namespace = sJob.Namespace
2015-09-06 02:08:47 +00:00
var out structs.JobRegisterResponse
if err := s.agent.RPC("Job.Register", &regReq, &out); err != nil {
2015-09-06 02:08:47 +00:00
return nil, err
}
setIndex(resp, out.Index)
return out, nil
2015-09-06 01:00:30 +00:00
}
func (s *HTTPServer) jobDelete(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
2017-04-15 03:54:30 +00:00
purgeStr := req.URL.Query().Get("purge")
var purgeBool bool
if purgeStr != "" {
var err error
purgeBool, err = strconv.ParseBool(purgeStr)
if err != nil {
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a bool: %v", "purge", purgeStr, err)
}
}
2015-09-06 01:20:47 +00:00
args := structs.JobDeregisterRequest{
JobID: jobName,
2017-04-15 03:54:30 +00:00
Purge: purgeBool,
2015-09-06 01:20:47 +00:00
}
2017-09-07 23:56:15 +00:00
s.parseWriteRequest(req, &args.WriteRequest)
2015-09-06 01:20:47 +00:00
var out structs.JobDeregisterResponse
if err := s.agent.RPC("Job.Deregister", &args, &out); err != nil {
2015-09-06 01:20:47 +00:00
return nil, err
}
2015-09-06 01:43:40 +00:00
setIndex(resp, out.Index)
2015-09-06 01:20:47 +00:00
return out, nil
2015-09-06 00:06:05 +00:00
}
func (s *HTTPServer) jobScale(resp http.ResponseWriter, req *http.Request,
jobAndTarget string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
var args api.ScalingRequest
if err := decodeBody(req, &args); err != nil {
return nil, CodedError(400, err.Error())
}
if args.JobID == "" {
return nil, CodedError(400, "Job ID must be specified")
}
if !strings.HasPrefix(jobAndTarget, args.JobID) {
return nil, CodedError(400, "Job ID does not match")
}
subTarget := strings.TrimPrefix(jobAndTarget, args.JobID)
groupScale := regexp.MustCompile(`/[^/]+/scale`)
groupName := groupScale.FindString(subTarget)
if groupName == "" {
return nil, CodedError(400, "Invalid scaling target")
}
scaleReq := structs.JobScaleRequest{
JobID: args.JobID,
GroupName: groupName,
Value: args.Value,
PolicyOverride: args.PolicyOverride,
Reason: args.Reason,
}
// parseWriteRequest overrides Namespace, Region and AuthToken
// based on values from the original http request
s.parseWriteRequest(req, &scaleReq.WriteRequest)
var out structs.JobRegisterResponse
if err := s.agent.RPC("Job.Scale", &scaleReq, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
2017-04-13 23:55:21 +00:00
func (s *HTTPServer) jobVersions(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
diffsStr := req.URL.Query().Get("diffs")
var diffsBool bool
if diffsStr != "" {
var err error
diffsBool, err = strconv.ParseBool(diffsStr)
if err != nil {
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a bool: %v", "diffs", diffsStr, err)
}
}
args := structs.JobVersionsRequest{
2017-04-13 23:55:21 +00:00
JobID: jobName,
Diffs: diffsBool,
2017-04-13 23:55:21 +00:00
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.JobVersionsResponse
if err := s.agent.RPC("Job.GetJobVersions", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if len(out.Versions) == 0 {
return nil, CodedError(404, "job versions not found")
}
return out, nil
2017-04-13 23:55:21 +00:00
}
2017-04-19 18:33:06 +00:00
func (s *HTTPServer) jobRevert(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
var revertRequest structs.JobRevertRequest
if err := decodeBody(req, &revertRequest); err != nil {
return nil, CodedError(400, err.Error())
}
if revertRequest.JobID == "" {
return nil, CodedError(400, "JobID must be specified")
}
if revertRequest.JobID != jobName {
return nil, CodedError(400, "Job ID does not match")
}
2017-09-07 23:56:15 +00:00
s.parseWriteRequest(req, &revertRequest.WriteRequest)
2017-04-19 18:33:06 +00:00
var out structs.JobRegisterResponse
if err := s.agent.RPC("Job.Revert", &revertRequest, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
return out, nil
}
2017-07-06 19:49:13 +00:00
func (s *HTTPServer) jobStable(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
var stableRequest structs.JobStabilityRequest
if err := decodeBody(req, &stableRequest); err != nil {
return nil, CodedError(400, err.Error())
}
if stableRequest.JobID == "" {
return nil, CodedError(400, "JobID must be specified")
}
if stableRequest.JobID != jobName {
return nil, CodedError(400, "Job ID does not match")
}
2017-09-07 23:56:15 +00:00
s.parseWriteRequest(req, &stableRequest.WriteRequest)
2017-07-06 19:49:13 +00:00
var out structs.JobStabilityResponse
if err := s.agent.RPC("Job.Stable", &stableRequest, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
func (s *HTTPServer) jobSummaryRequest(resp http.ResponseWriter, req *http.Request, name string) (interface{}, error) {
args := structs.JobSummaryRequest{
JobID: name,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
2016-07-21 21:43:21 +00:00
var out structs.JobSummaryResponse
if err := s.agent.RPC("Job.Summary", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.JobSummary == nil {
return nil, CodedError(404, "job not found")
}
setIndex(resp, out.Index)
return out.JobSummary, nil
}
2016-11-26 02:04:55 +00:00
func (s *HTTPServer) jobDispatchRequest(resp http.ResponseWriter, req *http.Request, name string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
2016-12-02 00:27:22 +00:00
args := structs.JobDispatchRequest{}
2016-11-26 02:04:55 +00:00
if err := decodeBody(req, &args); err != nil {
return nil, CodedError(400, err.Error())
}
2016-12-02 00:27:22 +00:00
if args.JobID != "" && args.JobID != name {
return nil, CodedError(400, "Job ID does not match")
}
if args.JobID == "" {
args.JobID = name
}
2017-09-07 23:56:15 +00:00
s.parseWriteRequest(req, &args.WriteRequest)
2016-11-26 02:04:55 +00:00
var out structs.JobDispatchResponse
if err := s.agent.RPC("Job.Dispatch", &args, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
2017-02-06 19:48:28 +00:00
// JobsParseRequest parses a hcl jobspec and returns a api.Job
func (s *HTTPServer) JobsParseRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != http.MethodPut && req.Method != http.MethodPost {
return nil, CodedError(405, ErrInvalidMethod)
}
args := &api.JobsParseRequest{}
if err := decodeBody(req, &args); err != nil {
return nil, CodedError(400, err.Error())
}
if args.JobHCL == "" {
return nil, CodedError(400, "Job spec is empty")
}
jobfile := strings.NewReader(args.JobHCL)
jobStruct, err := jobspec.Parse(jobfile)
if err != nil {
return nil, CodedError(400, err.Error())
}
if args.Canonicalize {
jobStruct.Canonicalize()
}
return jobStruct, nil
}
func ApiJobToStructJob(job *api.Job) *structs.Job {
2017-02-06 19:48:28 +00:00
job.Canonicalize()
j := &structs.Job{
2017-04-15 03:54:30 +00:00
Stop: *job.Stop,
2017-04-13 20:54:57 +00:00
Region: *job.Region,
2017-09-07 23:56:15 +00:00
Namespace: *job.Namespace,
2017-04-13 20:54:57 +00:00
ID: *job.ID,
ParentID: *job.ParentID,
Name: *job.Name,
Type: *job.Type,
Priority: *job.Priority,
AllAtOnce: *job.AllAtOnce,
Datacenters: job.Datacenters,
Payload: job.Payload,
Meta: job.Meta,
ConsulToken: *job.ConsulToken,
2017-04-13 20:54:57 +00:00
VaultToken: *job.VaultToken,
Constraints: ApiConstraintsToStructs(job.Constraints),
Affinities: ApiAffinitiesToStructs(job.Affinities),
2018-07-16 13:30:58 +00:00
}
// Update has been pushed into the task groups. stagger and max_parallel are
// preserved at the job level, but all other values are discarded. The job.Update
// api value is merged into TaskGroups already in api.Canonicalize
if job.Update != nil && job.Update.MaxParallel != nil && *job.Update.MaxParallel > 0 {
j.Update = structs.UpdateStrategy{}
if job.Update.Stagger != nil {
j.Update.Stagger = *job.Update.Stagger
}
if job.Update.MaxParallel != nil {
j.Update.MaxParallel = *job.Update.MaxParallel
2017-02-06 19:48:28 +00:00
}
}
2017-04-16 23:54:02 +00:00
if l := len(job.Spreads); l != 0 {
j.Spreads = make([]*structs.Spread, l)
for i, apiSpread := range job.Spreads {
j.Spreads[i] = ApiSpreadToStructs(apiSpread)
}
}
2017-02-06 19:48:28 +00:00
if job.Periodic != nil {
j.Periodic = &structs.PeriodicConfig{
2017-02-13 23:18:17 +00:00
Enabled: *job.Periodic.Enabled,
SpecType: *job.Periodic.SpecType,
ProhibitOverlap: *job.Periodic.ProhibitOverlap,
2017-02-21 00:36:41 +00:00
TimeZone: *job.Periodic.TimeZone,
2017-02-13 23:18:17 +00:00
}
2017-04-16 23:54:02 +00:00
2017-02-13 23:18:17 +00:00
if job.Periodic.Spec != nil {
j.Periodic.Spec = *job.Periodic.Spec
2017-02-06 19:48:28 +00:00
}
}
2017-04-16 23:54:02 +00:00
2017-02-06 19:48:28 +00:00
if job.ParameterizedJob != nil {
j.ParameterizedJob = &structs.ParameterizedJobConfig{
Payload: job.ParameterizedJob.Payload,
MetaRequired: job.ParameterizedJob.MetaRequired,
MetaOptional: job.ParameterizedJob.MetaOptional,
}
}
2017-04-16 23:54:02 +00:00
if l := len(job.TaskGroups); l != 0 {
j.TaskGroups = make([]*structs.TaskGroup, l)
for i, taskGroup := range job.TaskGroups {
tg := &structs.TaskGroup{}
ApiTgToStructsTG(j, taskGroup, tg)
2017-04-16 23:54:02 +00:00
j.TaskGroups[i] = tg
}
2017-02-06 19:48:28 +00:00
}
return j
}
func ApiTgToStructsTG(job *structs.Job, taskGroup *api.TaskGroup, tg *structs.TaskGroup) {
2017-02-06 19:48:28 +00:00
tg.Name = *taskGroup.Name
tg.Count = *taskGroup.Count
tg.Meta = taskGroup.Meta
tg.Constraints = ApiConstraintsToStructs(taskGroup.Constraints)
tg.Affinities = ApiAffinitiesToStructs(taskGroup.Affinities)
tg.Networks = ApiNetworkResourceToStructs(taskGroup.Networks)
tg.Services = ApiServicesToStructs(taskGroup.Services)
2018-07-16 13:30:58 +00:00
2017-02-06 19:48:28 +00:00
tg.RestartPolicy = &structs.RestartPolicy{
2017-02-13 23:18:17 +00:00
Attempts: *taskGroup.RestartPolicy.Attempts,
Interval: *taskGroup.RestartPolicy.Interval,
Delay: *taskGroup.RestartPolicy.Delay,
Mode: *taskGroup.RestartPolicy.Mode,
2017-02-06 19:48:28 +00:00
}
2017-04-16 23:54:02 +00:00
if taskGroup.ShutdownDelay != nil {
tg.ShutdownDelay = taskGroup.ShutdownDelay
}
if taskGroup.ReschedulePolicy != nil {
tg.ReschedulePolicy = &structs.ReschedulePolicy{
Attempts: *taskGroup.ReschedulePolicy.Attempts,
Interval: *taskGroup.ReschedulePolicy.Interval,
Delay: *taskGroup.ReschedulePolicy.Delay,
DelayFunction: *taskGroup.ReschedulePolicy.DelayFunction,
MaxDelay: *taskGroup.ReschedulePolicy.MaxDelay,
Unlimited: *taskGroup.ReschedulePolicy.Unlimited,
}
}
2018-03-01 19:21:32 +00:00
if taskGroup.Migrate != nil {
tg.Migrate = &structs.MigrateStrategy{
MaxParallel: *taskGroup.Migrate.MaxParallel,
HealthCheck: *taskGroup.Migrate.HealthCheck,
MinHealthyTime: *taskGroup.Migrate.MinHealthyTime,
HealthyDeadline: *taskGroup.Migrate.HealthyDeadline,
}
}
if taskGroup.Scaling != nil {
tg.Scaling = ApiScalingPolicyToStructs(job, taskGroup.Scaling).TargetTaskGroup(job, tg)
}
2017-02-06 19:48:28 +00:00
tg.EphemeralDisk = &structs.EphemeralDisk{
Sticky: *taskGroup.EphemeralDisk.Sticky,
SizeMB: *taskGroup.EphemeralDisk.SizeMB,
Migrate: *taskGroup.EphemeralDisk.Migrate,
}
2017-04-16 23:54:02 +00:00
if l := len(taskGroup.Spreads); l != 0 {
tg.Spreads = make([]*structs.Spread, l)
for k, spread := range taskGroup.Spreads {
tg.Spreads[k] = ApiSpreadToStructs(spread)
}
}
if l := len(taskGroup.Volumes); l != 0 {
tg.Volumes = make(map[string]*structs.VolumeRequest, l)
for k, v := range taskGroup.Volumes {
if v.Type != structs.VolumeTypeHost && v.Type != structs.VolumeTypeCSI {
// Ignore volumes we don't understand in this iteration currently.
// - This is because we don't currently have a way to return errors here.
continue
}
vol := &structs.VolumeRequest{
Name: v.Name,
Type: v.Type,
ReadOnly: v.ReadOnly,
config: Hoist volume.config.source into volume Currently, using a Volume in a job uses the following configuration: ``` volume "alias-name" { type = "volume-type" read_only = true config { source = "host_volume_name" } } ``` This commit migrates to the following: ``` volume "alias-name" { type = "volume-type" source = "host_volume_name" read_only = true } ``` The original design was based due to being uncertain about the future of storage plugins, and to allow maxium flexibility. However, this causes a few issues, namely: - We frequently need to parse this configuration during submission, scheduling, and mounting - It complicates the configuration from and end users perspective - It complicates the ability to do validation As we understand the problem space of CSI a little more, it has become clear that we won't need the `source` to be in config, as it will be used in the majority of cases: - Host Volumes: Always need a source - Preallocated CSI Volumes: Always needs a source from a volume or claim name - Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes to for managing upgrades and to avoid dangling. - Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point to the plugin name, and a `config` block will allow you to pass meta to the plugin. Or will point to a pre-configured ephemeral config. *If implemented The new design simplifies this by merging the source into the volume stanza to solve the above issues with usability, performance, and error handling.
2019-09-13 02:09:58 +00:00
Source: v.Source,
}
csi: add mount_options to volumes and volume requests (#7398) Add mount_options to both the volume definition on registration and to the volume block in the group where the volume is requested. If both are specified, the options provided in the request replace the options defined in the volume. They get passed to the NodePublishVolume, which causes the node plugin to actually mount the volume on the host. Individual tasks just mount bind into the host mounted volume (unchanged behavior). An operator can mount the same volume with different options by specifying it twice in the group context. closes #7007 * nomad/structs/volumes: add MountOptions to volume request * jobspec/test-fixtures/basic.hcl: add mount_options to volume block * jobspec/parse_test: add expected MountOptions * api/tasks: add mount_options * jobspec/parse_group: use hcl decode not mapstructure, mount_options * client/allocrunner/csi_hook: pass MountOptions through client/allocrunner/csi_hook: add a VolumeMountOptions client/allocrunner/csi_hook: drop Options client/allocrunner/csi_hook: use the structs options * client/pluginmanager/csimanager/interface: UsageOptions.MountOptions * client/pluginmanager/csimanager/volume: pass MountOptions in capabilities * plugins/csi/plugin: remove todo 7007 comment * nomad/structs/csi: MountOptions * api/csi: add options to the api for parsing, match structs * plugins/csi/plugin: move VolumeMountOptions to structs * api/csi: use specific type for mount_options * client/allocrunner/csi_hook: merge MountOptions here * rename CSIOptions to CSIMountOptions * client/allocrunner/csi_hook * client/pluginmanager/csimanager/volume * nomad/structs/csi * plugins/csi/fake/client: add PrevVolumeCapability * plugins/csi/plugin * client/pluginmanager/csimanager/volume_test: remove debugging * client/pluginmanager/csimanager/volume: fix odd merging logic * api: rename CSIOptions -> CSIMountOptions * nomad/csi_endpoint: remove a 7007 comment * command/alloc_status: show mount options in the volume list * nomad/structs/csi: include MountOptions in the volume stub * api/csi: add MountOptions to stub * command/volume_status_csi: clean up csiVolMountOption, add it * command/alloc_status: csiVolMountOption lives in volume_csi_status * command/node_status: display mount flags * nomad/structs/volumes: npe * plugins/csi/plugin: npe in ToCSIRepresentation * jobspec/parse_test: expand volume parse test cases * command/agent/job_endpoint: ApiTgToStructsTG needs MountOptions * command/volume_status_csi: copy paste error * jobspec/test-fixtures/basic: hclfmt * command/volume_status_csi: clean up csiVolMountOption
2020-03-23 17:55:26 +00:00
if v.MountOptions != nil {
vol.MountOptions = &structs.CSIMountOptions{
FSType: v.MountOptions.FSType,
MountFlags: v.MountOptions.MountFlags,
}
}
tg.Volumes[k] = vol
}
}
if taskGroup.Update != nil {
tg.Update = &structs.UpdateStrategy{
2018-03-23 17:56:00 +00:00
Stagger: *taskGroup.Update.Stagger,
MaxParallel: *taskGroup.Update.MaxParallel,
HealthCheck: *taskGroup.Update.HealthCheck,
MinHealthyTime: *taskGroup.Update.MinHealthyTime,
HealthyDeadline: *taskGroup.Update.HealthyDeadline,
ProgressDeadline: *taskGroup.Update.ProgressDeadline,
Canary: *taskGroup.Update.Canary,
}
// boolPtr fields may be nil, others will have pointers to default values via Canonicalize
if taskGroup.Update.AutoRevert != nil {
tg.Update.AutoRevert = *taskGroup.Update.AutoRevert
}
if taskGroup.Update.AutoPromote != nil {
tg.Update.AutoPromote = *taskGroup.Update.AutoPromote
}
}
2017-04-16 23:54:02 +00:00
if l := len(taskGroup.Tasks); l != 0 {
tg.Tasks = make([]*structs.Task, l)
for l, task := range taskGroup.Tasks {
t := &structs.Task{}
ApiTaskToStructsTask(task, t)
tg.Tasks[l] = t
}
2017-02-06 19:48:28 +00:00
}
}
// ApiTaskToStructsTask is a copy and type conversion between the API
// representation of a task from a struct representation of a task.
func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) {
2017-02-13 23:18:17 +00:00
structsTask.Name = apiTask.Name
2017-02-06 19:48:28 +00:00
structsTask.Driver = apiTask.Driver
structsTask.User = apiTask.User
2017-02-21 00:36:41 +00:00
structsTask.Leader = apiTask.Leader
2017-02-06 19:48:28 +00:00
structsTask.Config = apiTask.Config
structsTask.Env = apiTask.Env
2017-04-16 23:54:02 +00:00
structsTask.Meta = apiTask.Meta
structsTask.KillTimeout = *apiTask.KillTimeout
structsTask.ShutdownDelay = apiTask.ShutdownDelay
structsTask.KillSignal = apiTask.KillSignal
2019-08-12 22:41:40 +00:00
structsTask.Kind = structs.TaskKind(apiTask.Kind)
structsTask.Constraints = ApiConstraintsToStructs(apiTask.Constraints)
structsTask.Affinities = ApiAffinitiesToStructs(apiTask.Affinities)
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
structsTask.CSIPluginConfig = ApiCSIPluginConfigToStructsCSIPluginConfig(apiTask.CSIPluginConfig)
2018-07-16 13:30:58 +00:00
if l := len(apiTask.VolumeMounts); l != 0 {
structsTask.VolumeMounts = make([]*structs.VolumeMount, l)
for i, mount := range apiTask.VolumeMounts {
structsTask.VolumeMounts[i] = &structs.VolumeMount{
Volume: *mount.Volume,
Destination: *mount.Destination,
ReadOnly: *mount.ReadOnly,
PropagationMode: *mount.PropagationMode,
}
}
}
2017-04-16 23:54:02 +00:00
if l := len(apiTask.Services); l != 0 {
structsTask.Services = make([]*structs.Service, l)
for i, service := range apiTask.Services {
structsTask.Services[i] = &structs.Service{
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
Name: service.Name,
PortLabel: service.PortLabel,
Tags: service.Tags,
CanaryTags: service.CanaryTags,
EnableTagOverride: service.EnableTagOverride,
AddressMode: service.AddressMode,
Meta: helper.CopyMapStringString(service.Meta),
CanaryMeta: helper.CopyMapStringString(service.CanaryMeta),
2017-04-16 23:54:02 +00:00
}
if l := len(service.Checks); l != 0 {
structsTask.Services[i].Checks = make([]*structs.ServiceCheck, l)
for j, check := range service.Checks {
structsTask.Services[i].Checks[j] = &structs.ServiceCheck{
2017-08-25 00:18:06 +00:00
Name: check.Name,
Type: check.Type,
Command: check.Command,
Args: check.Args,
Path: check.Path,
Protocol: check.Protocol,
PortLabel: check.PortLabel,
AddressMode: check.AddressMode,
2017-08-25 00:18:06 +00:00
Interval: check.Interval,
Timeout: check.Timeout,
InitialStatus: check.InitialStatus,
TLSSkipVerify: check.TLSSkipVerify,
Header: check.Header,
Method: check.Method,
GRPCService: check.GRPCService,
GRPCUseTLS: check.GRPCUseTLS,
2017-08-25 00:18:06 +00:00
}
if check.CheckRestart != nil {
structsTask.Services[i].Checks[j].CheckRestart = &structs.CheckRestart{
Limit: check.CheckRestart.Limit,
2017-09-11 00:31:55 +00:00
Grace: *check.CheckRestart.Grace,
IgnoreWarnings: check.CheckRestart.IgnoreWarnings,
2017-08-25 00:18:06 +00:00
}
2017-04-16 23:54:02 +00:00
}
}
2017-02-06 19:48:28 +00:00
}
}
}
2017-04-16 23:54:02 +00:00
2018-10-08 22:38:03 +00:00
structsTask.Resources = ApiResourcesToStructs(apiTask.Resources)
2017-04-16 23:54:02 +00:00
2017-02-06 19:48:28 +00:00
structsTask.LogConfig = &structs.LogConfig{
MaxFiles: *apiTask.LogConfig.MaxFiles,
MaxFileSizeMB: *apiTask.LogConfig.MaxFileSizeMB,
}
2017-04-16 23:54:02 +00:00
if l := len(apiTask.Artifacts); l != 0 {
structsTask.Artifacts = make([]*structs.TaskArtifact, l)
for k, ta := range apiTask.Artifacts {
structsTask.Artifacts[k] = &structs.TaskArtifact{
GetterSource: *ta.GetterSource,
GetterOptions: ta.GetterOptions,
GetterMode: *ta.GetterMode,
2017-04-16 23:54:02 +00:00
RelativeDest: *ta.RelativeDest,
}
2017-02-06 19:48:28 +00:00
}
}
2017-04-16 23:54:02 +00:00
2017-02-06 19:48:28 +00:00
if apiTask.Vault != nil {
structsTask.Vault = &structs.Vault{
Policies: apiTask.Vault.Policies,
Env: *apiTask.Vault.Env,
ChangeMode: *apiTask.Vault.ChangeMode,
ChangeSignal: *apiTask.Vault.ChangeSignal,
}
}
2017-04-16 23:54:02 +00:00
if l := len(apiTask.Templates); l != 0 {
structsTask.Templates = make([]*structs.Template, l)
for i, template := range apiTask.Templates {
structsTask.Templates[i] = &structs.Template{
SourcePath: *template.SourcePath,
DestPath: *template.DestPath,
EmbeddedTmpl: *template.EmbeddedTmpl,
ChangeMode: *template.ChangeMode,
ChangeSignal: *template.ChangeSignal,
Splay: *template.Splay,
Perms: *template.Perms,
LeftDelim: *template.LeftDelim,
RightDelim: *template.RightDelim,
Envvars: *template.Envvars,
VaultGrace: *template.VaultGrace,
2017-04-16 23:54:02 +00:00
}
2017-02-06 19:48:28 +00:00
}
}
2017-04-16 23:54:02 +00:00
2017-02-06 19:48:28 +00:00
if apiTask.DispatchPayload != nil {
structsTask.DispatchPayload = &structs.DispatchPayloadConfig{
File: apiTask.DispatchPayload.File,
}
}
2019-10-11 17:25:32 +00:00
if apiTask.Lifecycle != nil {
structsTask.Lifecycle = &structs.TaskLifecycleConfig{
Hook: apiTask.Lifecycle.Hook,
Sidecar: apiTask.Lifecycle.Sidecar,
2019-10-11 17:25:32 +00:00
}
}
2017-02-06 19:48:28 +00:00
}
2017-02-13 23:18:17 +00:00
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
func ApiCSIPluginConfigToStructsCSIPluginConfig(apiConfig *api.TaskCSIPluginConfig) *structs.TaskCSIPluginConfig {
if apiConfig == nil {
return nil
}
sc := &structs.TaskCSIPluginConfig{}
sc.ID = apiConfig.ID
sc.Type = structs.CSIPluginType(apiConfig.Type)
sc.MountDir = apiConfig.MountDir
return sc
}
2018-10-08 22:38:03 +00:00
func ApiResourcesToStructs(in *api.Resources) *structs.Resources {
if in == nil {
return nil
}
out := &structs.Resources{
CPU: *in.CPU,
MemoryMB: *in.MemoryMB,
}
// COMPAT(0.10): Only being used to issue warnings
if in.IOPS != nil {
out.IOPS = *in.IOPS
2018-10-08 22:38:03 +00:00
}
if len(in.Networks) != 0 {
out.Networks = ApiNetworkResourceToStructs(in.Networks)
}
if l := len(in.Devices); l != 0 {
out.Devices = make([]*structs.RequestedDevice, l)
for i, d := range in.Devices {
out.Devices[i] = &structs.RequestedDevice{
Name: d.Name,
Count: *d.Count,
Constraints: ApiConstraintsToStructs(d.Constraints),
Affinities: ApiAffinitiesToStructs(d.Affinities),
}
}
}
return out
}
func ApiNetworkResourceToStructs(in []*api.NetworkResource) []*structs.NetworkResource {
var out []*structs.NetworkResource
2019-05-08 17:56:15 +00:00
if len(in) == 0 {
return out
}
out = make([]*structs.NetworkResource, len(in))
for i, nw := range in {
out[i] = &structs.NetworkResource{
Mode: nw.Mode,
CIDR: nw.CIDR,
IP: nw.IP,
MBits: *nw.MBits,
}
2018-10-08 22:38:03 +00:00
2019-05-08 17:56:15 +00:00
if l := len(nw.DynamicPorts); l != 0 {
out[i].DynamicPorts = make([]structs.Port, l)
for j, dp := range nw.DynamicPorts {
out[i].DynamicPorts[j] = structs.Port{
Label: dp.Label,
Value: dp.Value,
To: dp.To,
2018-10-08 22:38:03 +00:00
}
}
2019-05-08 17:56:15 +00:00
}
2018-10-08 22:38:03 +00:00
2019-05-08 17:56:15 +00:00
if l := len(nw.ReservedPorts); l != 0 {
out[i].ReservedPorts = make([]structs.Port, l)
for j, rp := range nw.ReservedPorts {
out[i].ReservedPorts[j] = structs.Port{
Label: rp.Label,
Value: rp.Value,
To: rp.To,
2018-10-08 22:38:03 +00:00
}
}
}
}
return out
}
//TODO(schmichael) refactor and reuse in service parsing above
func ApiServicesToStructs(in []*api.Service) []*structs.Service {
if len(in) == 0 {
return nil
}
out := make([]*structs.Service, len(in))
for i, s := range in {
out[i] = &structs.Service{
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
Name: s.Name,
PortLabel: s.PortLabel,
Tags: s.Tags,
CanaryTags: s.CanaryTags,
EnableTagOverride: s.EnableTagOverride,
AddressMode: s.AddressMode,
Meta: helper.CopyMapStringString(s.Meta),
CanaryMeta: helper.CopyMapStringString(s.CanaryMeta),
}
if l := len(s.Checks); l != 0 {
out[i].Checks = make([]*structs.ServiceCheck, l)
for j, check := range s.Checks {
out[i].Checks[j] = &structs.ServiceCheck{
Name: check.Name,
Type: check.Type,
Command: check.Command,
Args: check.Args,
Path: check.Path,
Protocol: check.Protocol,
PortLabel: check.PortLabel,
AddressMode: check.AddressMode,
Interval: check.Interval,
Timeout: check.Timeout,
InitialStatus: check.InitialStatus,
TLSSkipVerify: check.TLSSkipVerify,
Header: check.Header,
Method: check.Method,
GRPCService: check.GRPCService,
GRPCUseTLS: check.GRPCUseTLS,
TaskName: check.TaskName,
}
if check.CheckRestart != nil {
out[i].Checks[j].CheckRestart = &structs.CheckRestart{
Limit: check.CheckRestart.Limit,
Grace: *check.CheckRestart.Grace,
IgnoreWarnings: check.CheckRestart.IgnoreWarnings,
}
}
}
}
if s.Connect != nil {
out[i].Connect = ApiConsulConnectToStructs(s.Connect)
}
}
return out
}
func ApiConsulConnectToStructs(in *api.ConsulConnect) *structs.ConsulConnect {
if in == nil {
return nil
}
out := &structs.ConsulConnect{
Native: in.Native,
}
if in.SidecarService != nil {
out.SidecarService = &structs.ConsulSidecarService{
Tags: helper.CopySliceString(in.SidecarService.Tags),
Port: in.SidecarService.Port,
}
if in.SidecarService.Proxy != nil {
out.SidecarService.Proxy = &structs.ConsulProxy{
LocalServiceAddress: in.SidecarService.Proxy.LocalServiceAddress,
LocalServicePort: in.SidecarService.Proxy.LocalServicePort,
Config: in.SidecarService.Proxy.Config,
}
upstreams := make([]structs.ConsulUpstream, len(in.SidecarService.Proxy.Upstreams))
for i, p := range in.SidecarService.Proxy.Upstreams {
upstreams[i] = structs.ConsulUpstream{
DestinationName: p.DestinationName,
LocalBindPort: p.LocalBindPort,
}
}
out.SidecarService.Proxy.Upstreams = upstreams
}
}
if in.SidecarTask != nil {
out.SidecarTask = &structs.SidecarTask{
Name: in.SidecarTask.Name,
Driver: in.SidecarTask.Driver,
Config: in.SidecarTask.Config,
User: in.SidecarTask.User,
Env: in.SidecarTask.Env,
Resources: ApiResourcesToStructs(in.SidecarTask.Resources),
Meta: in.SidecarTask.Meta,
LogConfig: &structs.LogConfig{},
ShutdownDelay: in.SidecarTask.ShutdownDelay,
KillSignal: in.SidecarTask.KillSignal,
}
if in.SidecarTask.KillTimeout != nil {
out.SidecarTask.KillTimeout = in.SidecarTask.KillTimeout
}
if in.SidecarTask.LogConfig != nil {
out.SidecarTask.LogConfig = &structs.LogConfig{}
if in.SidecarTask.LogConfig.MaxFiles != nil {
out.SidecarTask.LogConfig.MaxFiles = *in.SidecarTask.LogConfig.MaxFiles
}
if in.SidecarTask.LogConfig.MaxFileSizeMB != nil {
out.SidecarTask.LogConfig.MaxFileSizeMB = *in.SidecarTask.LogConfig.MaxFileSizeMB
}
}
}
return out
}
func ApiConstraintsToStructs(in []*api.Constraint) []*structs.Constraint {
if in == nil {
return nil
}
out := make([]*structs.Constraint, len(in))
for i, ac := range in {
out[i] = ApiConstraintToStructs(ac)
}
return out
}
func ApiConstraintToStructs(in *api.Constraint) *structs.Constraint {
if in == nil {
return nil
}
return &structs.Constraint{
LTarget: in.LTarget,
RTarget: in.RTarget,
Operand: in.Operand,
}
}
func ApiAffinitiesToStructs(in []*api.Affinity) []*structs.Affinity {
if in == nil {
return nil
}
out := make([]*structs.Affinity, len(in))
for i, ac := range in {
out[i] = ApiAffinityToStructs(ac)
}
return out
2017-02-13 23:18:17 +00:00
}
2018-07-16 13:30:58 +00:00
func ApiAffinityToStructs(a1 *api.Affinity) *structs.Affinity {
return &structs.Affinity{
LTarget: a1.LTarget,
Operand: a1.Operand,
RTarget: a1.RTarget,
Weight: *a1.Weight,
}
2018-07-16 13:30:58 +00:00
}
func ApiSpreadToStructs(a1 *api.Spread) *structs.Spread {
ret := &structs.Spread{}
ret.Attribute = a1.Attribute
ret.Weight = *a1.Weight
if a1.SpreadTarget != nil {
ret.SpreadTarget = make([]*structs.SpreadTarget, len(a1.SpreadTarget))
for i, st := range a1.SpreadTarget {
ret.SpreadTarget[i] = &structs.SpreadTarget{
Value: st.Value,
Percent: st.Percent,
}
}
}
return ret
}