2015-09-06 00:06:05 +00:00
|
|
|
package agent
|
|
|
|
|
|
|
|
import (
|
2017-04-15 03:54:30 +00:00
|
|
|
"fmt"
|
2015-09-06 00:06:05 +00:00
|
|
|
"net/http"
|
2016-11-24 12:20:52 +00:00
|
|
|
"strconv"
|
2015-09-06 01:00:30 +00:00
|
|
|
"strings"
|
2015-09-06 01:20:47 +00:00
|
|
|
|
2016-11-29 00:05:56 +00:00
|
|
|
"github.com/golang/snappy"
|
2017-02-06 19:48:28 +00:00
|
|
|
"github.com/hashicorp/nomad/api"
|
2015-09-06 01:20:47 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2015-09-06 00:06:05 +00:00
|
|
|
)
|
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) JobsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
switch req.Method {
|
|
|
|
case "GET":
|
|
|
|
return s.jobListRequest(resp, req)
|
|
|
|
case "PUT", "POST":
|
2015-09-06 18:47:52 +00:00
|
|
|
return s.jobUpdate(resp, req, "")
|
2015-09-06 01:00:30 +00:00
|
|
|
default:
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
2015-09-06 00:06:05 +00:00
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) jobListRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
2015-09-06 19:32:22 +00:00
|
|
|
args := structs.JobListRequest{}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.JobListResponse
|
|
|
|
if err := s.agent.RPC("Job.List", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
2015-09-07 17:03:10 +00:00
|
|
|
if out.Jobs == nil {
|
|
|
|
out.Jobs = make([]*structs.JobListStub, 0)
|
|
|
|
}
|
2015-09-06 19:32:22 +00:00
|
|
|
return out.Jobs, nil
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) JobSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
path := strings.TrimPrefix(req.URL.Path, "/v1/job/")
|
|
|
|
switch {
|
|
|
|
case strings.HasSuffix(path, "/evaluate"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/evaluate")
|
|
|
|
return s.jobForceEvaluate(resp, req, jobName)
|
|
|
|
case strings.HasSuffix(path, "/allocations"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/allocations")
|
|
|
|
return s.jobAllocations(resp, req, jobName)
|
|
|
|
case strings.HasSuffix(path, "/evaluations"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/evaluations")
|
|
|
|
return s.jobEvaluations(resp, req, jobName)
|
2016-01-19 19:09:36 +00:00
|
|
|
case strings.HasSuffix(path, "/periodic/force"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/periodic/force")
|
|
|
|
return s.periodicForceRequest(resp, req, jobName)
|
2016-05-05 18:21:58 +00:00
|
|
|
case strings.HasSuffix(path, "/plan"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/plan")
|
|
|
|
return s.jobPlan(resp, req, jobName)
|
2016-07-18 23:51:47 +00:00
|
|
|
case strings.HasSuffix(path, "/summary"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/summary")
|
|
|
|
return s.jobSummaryRequest(resp, req, jobName)
|
2016-11-26 02:04:55 +00:00
|
|
|
case strings.HasSuffix(path, "/dispatch"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/dispatch")
|
|
|
|
return s.jobDispatchRequest(resp, req, jobName)
|
2017-04-13 23:55:21 +00:00
|
|
|
case strings.HasSuffix(path, "/versions"):
|
|
|
|
jobName := strings.TrimSuffix(path, "/versions")
|
|
|
|
return s.jobVersions(resp, req, jobName)
|
2015-09-06 01:00:30 +00:00
|
|
|
default:
|
|
|
|
return s.jobCRUD(resp, req, path)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
2015-09-06 00:06:05 +00:00
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) jobForceEvaluate(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2015-09-06 18:50:37 +00:00
|
|
|
args := structs.JobEvaluateRequest{
|
|
|
|
JobID: jobName,
|
|
|
|
}
|
|
|
|
s.parseRegion(req, &args.Region)
|
|
|
|
|
|
|
|
var out structs.JobRegisterResponse
|
|
|
|
if err := s.agent.RPC("Job.Evaluate", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out, nil
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 18:21:58 +00:00
|
|
|
func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
|
2017-02-16 01:40:51 +00:00
|
|
|
var args api.JobPlanRequest
|
2016-05-05 18:21:58 +00:00
|
|
|
if err := decodeBody(req, &args); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
|
|
|
if args.Job == nil {
|
|
|
|
return nil, CodedError(400, "Job must be specified")
|
|
|
|
}
|
2017-02-16 01:40:51 +00:00
|
|
|
if args.Job.ID == nil {
|
|
|
|
return nil, CodedError(400, "Job must have a valid ID")
|
|
|
|
}
|
|
|
|
if jobName != "" && *args.Job.ID != jobName {
|
2016-05-05 18:21:58 +00:00
|
|
|
return nil, CodedError(400, "Job ID does not match")
|
|
|
|
}
|
|
|
|
s.parseRegion(req, &args.Region)
|
|
|
|
|
2017-03-03 23:00:39 +00:00
|
|
|
sJob := ApiJobToStructJob(args.Job)
|
2017-02-16 01:40:51 +00:00
|
|
|
planReq := structs.JobPlanRequest{
|
|
|
|
Job: sJob,
|
|
|
|
Diff: args.Diff,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: args.WriteRequest.Region,
|
|
|
|
},
|
|
|
|
}
|
2016-05-05 18:21:58 +00:00
|
|
|
var out structs.JobPlanResponse
|
2017-02-16 01:40:51 +00:00
|
|
|
if err := s.agent.RPC("Job.Plan", &planReq, &out); err != nil {
|
2016-05-05 18:21:58 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-05-12 01:51:48 +00:00
|
|
|
setIndex(resp, out.Index)
|
2016-05-05 18:21:58 +00:00
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
|
|
|
// Ensure request method is POST or PUT
|
|
|
|
if !(req.Method == "POST" || req.Method == "PUT") {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
|
|
|
|
var validateRequest api.JobValidateRequest
|
|
|
|
if err := decodeBody(req, &validateRequest); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
|
|
|
if validateRequest.Job == nil {
|
|
|
|
return nil, CodedError(400, "Job must be specified")
|
|
|
|
}
|
|
|
|
|
2017-03-03 23:00:39 +00:00
|
|
|
job := ApiJobToStructJob(validateRequest.Job)
|
2017-02-06 19:48:28 +00:00
|
|
|
args := structs.JobValidateRequest{
|
|
|
|
Job: job,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: validateRequest.Region,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
s.parseRegion(req, &args.Region)
|
|
|
|
|
|
|
|
var out structs.JobValidateResponse
|
|
|
|
if err := s.agent.RPC("Job.Validate", &args, &out); err != nil {
|
2017-03-03 23:00:39 +00:00
|
|
|
return nil, err
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2016-01-19 19:09:36 +00:00
|
|
|
func (s *HTTPServer) periodicForceRequest(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
|
|
|
|
|
|
|
args := structs.PeriodicForceRequest{
|
|
|
|
JobID: jobName,
|
|
|
|
}
|
|
|
|
s.parseRegion(req, &args.Region)
|
|
|
|
|
|
|
|
var out structs.PeriodicForceResponse
|
|
|
|
if err := s.agent.RPC("Periodic.Force", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) jobAllocations(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "GET" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2016-11-24 12:20:52 +00:00
|
|
|
allAllocs, _ := strconv.ParseBool(req.URL.Query().Get("all"))
|
|
|
|
|
2015-09-06 19:32:22 +00:00
|
|
|
args := structs.JobSpecificRequest{
|
2016-11-24 12:20:52 +00:00
|
|
|
JobID: jobName,
|
|
|
|
AllAllocs: allAllocs,
|
2015-09-06 19:32:22 +00:00
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.JobAllocationsResponse
|
|
|
|
if err := s.agent.RPC("Job.Allocations", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
2015-09-07 17:03:10 +00:00
|
|
|
if out.Allocations == nil {
|
|
|
|
out.Allocations = make([]*structs.AllocListStub, 0)
|
|
|
|
}
|
2015-09-06 19:32:22 +00:00
|
|
|
return out.Allocations, nil
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
2015-09-06 00:06:05 +00:00
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) jobEvaluations(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
if req.Method != "GET" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2015-09-06 19:32:22 +00:00
|
|
|
args := structs.JobSpecificRequest{
|
|
|
|
JobID: jobName,
|
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.JobEvaluationsResponse
|
|
|
|
if err := s.agent.RPC("Job.Evaluations", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
2015-09-07 17:03:10 +00:00
|
|
|
if out.Evaluations == nil {
|
|
|
|
out.Evaluations = make([]*structs.Evaluation, 0)
|
|
|
|
}
|
2015-09-06 19:32:22 +00:00
|
|
|
return out.Evaluations, nil
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
2015-09-06 00:06:05 +00:00
|
|
|
|
2015-09-06 01:00:30 +00:00
|
|
|
func (s *HTTPServer) jobCRUD(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
switch req.Method {
|
|
|
|
case "GET":
|
|
|
|
return s.jobQuery(resp, req, jobName)
|
|
|
|
case "PUT", "POST":
|
|
|
|
return s.jobUpdate(resp, req, jobName)
|
|
|
|
case "DELETE":
|
|
|
|
return s.jobDelete(resp, req, jobName)
|
|
|
|
default:
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) jobQuery(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
2015-09-06 01:43:40 +00:00
|
|
|
args := structs.JobSpecificRequest{
|
|
|
|
JobID: jobName,
|
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.SingleJobResponse
|
|
|
|
if err := s.agent.RPC("Job.GetJob", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
|
|
|
if out.Job == nil {
|
|
|
|
return nil, CodedError(404, "job not found")
|
|
|
|
}
|
2016-11-29 00:05:56 +00:00
|
|
|
|
2016-12-14 20:50:08 +00:00
|
|
|
// Decode the payload if there is any
|
2016-11-29 00:05:56 +00:00
|
|
|
job := out.Job
|
2016-12-14 20:50:08 +00:00
|
|
|
if len(job.Payload) != 0 {
|
|
|
|
decoded, err := snappy.Decode(nil, out.Job.Payload)
|
2016-11-29 00:05:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
job = job.Copy()
|
2016-12-14 20:50:08 +00:00
|
|
|
job.Payload = decoded
|
2016-11-29 00:05:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return job, nil
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
2017-02-16 01:40:51 +00:00
|
|
|
var args api.JobRegisterRequest
|
2015-09-23 03:01:57 +00:00
|
|
|
if err := decodeBody(req, &args); err != nil {
|
2015-09-06 02:08:47 +00:00
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
2015-09-21 00:38:26 +00:00
|
|
|
if args.Job == nil {
|
|
|
|
return nil, CodedError(400, "Job must be specified")
|
|
|
|
}
|
2017-02-16 01:40:51 +00:00
|
|
|
|
|
|
|
if args.Job.ID == nil {
|
|
|
|
return nil, CodedError(400, "Job ID hasn't been provided")
|
|
|
|
}
|
|
|
|
if jobName != "" && *args.Job.ID != jobName {
|
|
|
|
return nil, CodedError(400, "Job ID does not match name")
|
2015-09-06 02:08:47 +00:00
|
|
|
}
|
|
|
|
s.parseRegion(req, &args.Region)
|
|
|
|
|
2017-03-03 23:00:39 +00:00
|
|
|
sJob := ApiJobToStructJob(args.Job)
|
2017-02-16 01:40:51 +00:00
|
|
|
|
|
|
|
regReq := structs.JobRegisterRequest{
|
|
|
|
Job: sJob,
|
|
|
|
EnforceIndex: args.EnforceIndex,
|
|
|
|
JobModifyIndex: args.JobModifyIndex,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: args.WriteRequest.Region,
|
|
|
|
},
|
|
|
|
}
|
2015-09-06 02:08:47 +00:00
|
|
|
var out structs.JobRegisterResponse
|
2017-02-16 01:40:51 +00:00
|
|
|
if err := s.agent.RPC("Job.Register", ®Req, &out); err != nil {
|
2015-09-06 02:08:47 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out, nil
|
2015-09-06 01:00:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *HTTPServer) jobDelete(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
purgeStr := req.URL.Query().Get("purge")
|
|
|
|
var purgeBool bool
|
|
|
|
if purgeStr != "" {
|
|
|
|
var err error
|
|
|
|
purgeBool, err = strconv.ParseBool(purgeStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a bool: %v", "purge", purgeStr, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-06 01:20:47 +00:00
|
|
|
args := structs.JobDeregisterRequest{
|
|
|
|
JobID: jobName,
|
2017-04-15 03:54:30 +00:00
|
|
|
Purge: purgeBool,
|
2015-09-06 01:20:47 +00:00
|
|
|
}
|
|
|
|
s.parseRegion(req, &args.Region)
|
|
|
|
|
|
|
|
var out structs.JobDeregisterResponse
|
2017-02-23 02:22:02 +00:00
|
|
|
if err := s.agent.RPC("Job.Deregister", &args, &out); err != nil {
|
2015-09-06 01:20:47 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2015-09-06 01:43:40 +00:00
|
|
|
setIndex(resp, out.Index)
|
2015-09-06 01:20:47 +00:00
|
|
|
return out, nil
|
2015-09-06 00:06:05 +00:00
|
|
|
}
|
2016-07-18 23:51:47 +00:00
|
|
|
|
2017-04-13 23:55:21 +00:00
|
|
|
func (s *HTTPServer) jobVersions(resp http.ResponseWriter, req *http.Request,
|
|
|
|
jobName string) (interface{}, error) {
|
|
|
|
args := structs.JobSpecificRequest{
|
|
|
|
JobID: jobName,
|
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var out structs.JobVersionsResponse
|
|
|
|
if err := s.agent.RPC("Job.GetJobVersions", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
|
|
|
if len(out.Versions) == 0 {
|
|
|
|
return nil, CodedError(404, "job versions not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
return out.Versions, nil
|
|
|
|
}
|
|
|
|
|
2016-07-18 23:51:47 +00:00
|
|
|
func (s *HTTPServer) jobSummaryRequest(resp http.ResponseWriter, req *http.Request, name string) (interface{}, error) {
|
|
|
|
args := structs.JobSummaryRequest{
|
|
|
|
JobID: name,
|
|
|
|
}
|
|
|
|
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2016-07-21 21:43:21 +00:00
|
|
|
var out structs.JobSummaryResponse
|
2016-07-25 21:33:39 +00:00
|
|
|
if err := s.agent.RPC("Job.Summary", &args, &out); err != nil {
|
2016-07-18 23:51:47 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
setMeta(resp, &out.QueryMeta)
|
|
|
|
if out.JobSummary == nil {
|
|
|
|
return nil, CodedError(404, "job not found")
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out.JobSummary, nil
|
|
|
|
}
|
2016-11-26 02:04:55 +00:00
|
|
|
|
|
|
|
func (s *HTTPServer) jobDispatchRequest(resp http.ResponseWriter, req *http.Request, name string) (interface{}, error) {
|
|
|
|
if req.Method != "PUT" && req.Method != "POST" {
|
|
|
|
return nil, CodedError(405, ErrInvalidMethod)
|
|
|
|
}
|
2016-12-02 00:27:22 +00:00
|
|
|
args := structs.JobDispatchRequest{}
|
2016-11-26 02:04:55 +00:00
|
|
|
if err := decodeBody(req, &args); err != nil {
|
|
|
|
return nil, CodedError(400, err.Error())
|
|
|
|
}
|
2016-12-02 00:27:22 +00:00
|
|
|
if args.JobID != "" && args.JobID != name {
|
|
|
|
return nil, CodedError(400, "Job ID does not match")
|
|
|
|
}
|
|
|
|
if args.JobID == "" {
|
|
|
|
args.JobID = name
|
|
|
|
}
|
|
|
|
|
2016-11-26 02:04:55 +00:00
|
|
|
s.parseRegion(req, &args.Region)
|
|
|
|
|
|
|
|
var out structs.JobDispatchResponse
|
|
|
|
if err := s.agent.RPC("Job.Dispatch", &args, &out); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
setIndex(resp, out.Index)
|
|
|
|
return out, nil
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
|
2017-03-03 23:00:39 +00:00
|
|
|
func ApiJobToStructJob(job *api.Job) *structs.Job {
|
2017-02-06 19:48:28 +00:00
|
|
|
job.Canonicalize()
|
|
|
|
|
|
|
|
j := &structs.Job{
|
2017-04-15 03:54:30 +00:00
|
|
|
Stop: *job.Stop,
|
2017-04-13 20:54:57 +00:00
|
|
|
Region: *job.Region,
|
|
|
|
ID: *job.ID,
|
|
|
|
ParentID: *job.ParentID,
|
|
|
|
Name: *job.Name,
|
|
|
|
Type: *job.Type,
|
|
|
|
Priority: *job.Priority,
|
|
|
|
AllAtOnce: *job.AllAtOnce,
|
|
|
|
Datacenters: job.Datacenters,
|
|
|
|
Payload: job.Payload,
|
|
|
|
Meta: job.Meta,
|
|
|
|
VaultToken: *job.VaultToken,
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
2017-04-16 23:54:02 +00:00
|
|
|
if l := len(job.Constraints); l != 0 {
|
|
|
|
j.Constraints = make([]*structs.Constraint, l)
|
|
|
|
for i, c := range job.Constraints {
|
|
|
|
con := &structs.Constraint{}
|
|
|
|
ApiConstraintToStructs(c, con)
|
|
|
|
j.Constraints[i] = con
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
if job.Update != nil {
|
|
|
|
j.Update = structs.UpdateStrategy{
|
|
|
|
Stagger: job.Update.Stagger,
|
|
|
|
MaxParallel: job.Update.MaxParallel,
|
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
if job.Periodic != nil {
|
|
|
|
j.Periodic = &structs.PeriodicConfig{
|
2017-02-13 23:18:17 +00:00
|
|
|
Enabled: *job.Periodic.Enabled,
|
|
|
|
SpecType: *job.Periodic.SpecType,
|
|
|
|
ProhibitOverlap: *job.Periodic.ProhibitOverlap,
|
2017-02-21 00:36:41 +00:00
|
|
|
TimeZone: *job.Periodic.TimeZone,
|
2017-02-13 23:18:17 +00:00
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-13 23:18:17 +00:00
|
|
|
if job.Periodic.Spec != nil {
|
|
|
|
j.Periodic.Spec = *job.Periodic.Spec
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
if job.ParameterizedJob != nil {
|
|
|
|
j.ParameterizedJob = &structs.ParameterizedJobConfig{
|
|
|
|
Payload: job.ParameterizedJob.Payload,
|
|
|
|
MetaRequired: job.ParameterizedJob.MetaRequired,
|
|
|
|
MetaOptional: job.ParameterizedJob.MetaOptional,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-16 23:54:02 +00:00
|
|
|
if l := len(job.TaskGroups); l != 0 {
|
|
|
|
j.TaskGroups = make([]*structs.TaskGroup, l)
|
|
|
|
for i, taskGroup := range job.TaskGroups {
|
|
|
|
tg := &structs.TaskGroup{}
|
|
|
|
ApiTgToStructsTG(taskGroup, tg)
|
|
|
|
j.TaskGroups[i] = tg
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return j
|
|
|
|
}
|
|
|
|
|
2017-03-03 23:00:39 +00:00
|
|
|
func ApiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) {
|
2017-02-06 19:48:28 +00:00
|
|
|
tg.Name = *taskGroup.Name
|
|
|
|
tg.Count = *taskGroup.Count
|
|
|
|
tg.Meta = taskGroup.Meta
|
2017-04-16 23:54:02 +00:00
|
|
|
|
|
|
|
if l := len(taskGroup.Constraints); l != 0 {
|
|
|
|
tg.Constraints = make([]*structs.Constraint, l)
|
|
|
|
for k, constraint := range taskGroup.Constraints {
|
|
|
|
c := &structs.Constraint{}
|
|
|
|
ApiConstraintToStructs(constraint, c)
|
|
|
|
tg.Constraints[k] = c
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
tg.RestartPolicy = &structs.RestartPolicy{
|
2017-02-13 23:18:17 +00:00
|
|
|
Attempts: *taskGroup.RestartPolicy.Attempts,
|
|
|
|
Interval: *taskGroup.RestartPolicy.Interval,
|
|
|
|
Delay: *taskGroup.RestartPolicy.Delay,
|
|
|
|
Mode: *taskGroup.RestartPolicy.Mode,
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
tg.EphemeralDisk = &structs.EphemeralDisk{
|
|
|
|
Sticky: *taskGroup.EphemeralDisk.Sticky,
|
|
|
|
SizeMB: *taskGroup.EphemeralDisk.SizeMB,
|
|
|
|
Migrate: *taskGroup.EphemeralDisk.Migrate,
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
|
|
|
if l := len(taskGroup.Tasks); l != 0 {
|
|
|
|
tg.Tasks = make([]*structs.Task, l)
|
|
|
|
for l, task := range taskGroup.Tasks {
|
|
|
|
t := &structs.Task{}
|
|
|
|
ApiTaskToStructsTask(task, t)
|
|
|
|
tg.Tasks[l] = t
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-03 23:00:39 +00:00
|
|
|
func ApiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) {
|
2017-02-13 23:18:17 +00:00
|
|
|
structsTask.Name = apiTask.Name
|
2017-02-06 19:48:28 +00:00
|
|
|
structsTask.Driver = apiTask.Driver
|
|
|
|
structsTask.User = apiTask.User
|
2017-02-21 00:36:41 +00:00
|
|
|
structsTask.Leader = apiTask.Leader
|
2017-02-06 19:48:28 +00:00
|
|
|
structsTask.Config = apiTask.Config
|
|
|
|
structsTask.Env = apiTask.Env
|
2017-04-16 23:54:02 +00:00
|
|
|
structsTask.Meta = apiTask.Meta
|
|
|
|
structsTask.KillTimeout = *apiTask.KillTimeout
|
|
|
|
|
|
|
|
if l := len(apiTask.Constraints); l != 0 {
|
|
|
|
structsTask.Constraints = make([]*structs.Constraint, l)
|
|
|
|
for i, constraint := range apiTask.Constraints {
|
|
|
|
c := &structs.Constraint{}
|
|
|
|
ApiConstraintToStructs(constraint, c)
|
|
|
|
structsTask.Constraints[i] = c
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if l := len(apiTask.Services); l != 0 {
|
|
|
|
structsTask.Services = make([]*structs.Service, l)
|
|
|
|
for i, service := range apiTask.Services {
|
|
|
|
structsTask.Services[i] = &structs.Service{
|
|
|
|
Name: service.Name,
|
|
|
|
PortLabel: service.PortLabel,
|
|
|
|
Tags: service.Tags,
|
|
|
|
}
|
|
|
|
|
|
|
|
if l := len(service.Checks); l != 0 {
|
|
|
|
structsTask.Services[i].Checks = make([]*structs.ServiceCheck, l)
|
|
|
|
for j, check := range service.Checks {
|
|
|
|
structsTask.Services[i].Checks[j] = &structs.ServiceCheck{
|
|
|
|
Name: check.Name,
|
|
|
|
Type: check.Type,
|
|
|
|
Command: check.Command,
|
|
|
|
Args: check.Args,
|
|
|
|
Path: check.Path,
|
|
|
|
Protocol: check.Protocol,
|
|
|
|
PortLabel: check.PortLabel,
|
|
|
|
Interval: check.Interval,
|
|
|
|
Timeout: check.Timeout,
|
|
|
|
InitialStatus: check.InitialStatus,
|
|
|
|
}
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
structsTask.Resources = &structs.Resources{
|
|
|
|
CPU: *apiTask.Resources.CPU,
|
|
|
|
MemoryMB: *apiTask.Resources.MemoryMB,
|
|
|
|
IOPS: *apiTask.Resources.IOPS,
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
|
|
|
if l := len(apiTask.Resources.Networks); l != 0 {
|
|
|
|
structsTask.Resources.Networks = make([]*structs.NetworkResource, l)
|
|
|
|
for i, nw := range apiTask.Resources.Networks {
|
|
|
|
structsTask.Resources.Networks[i] = &structs.NetworkResource{
|
|
|
|
CIDR: nw.CIDR,
|
|
|
|
IP: nw.IP,
|
|
|
|
MBits: *nw.MBits,
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
|
|
|
if l := len(nw.DynamicPorts); l != 0 {
|
|
|
|
structsTask.Resources.Networks[i].DynamicPorts = make([]structs.Port, l)
|
|
|
|
for j, dp := range nw.DynamicPorts {
|
|
|
|
structsTask.Resources.Networks[i].DynamicPorts[j] = structs.Port{
|
|
|
|
Label: dp.Label,
|
|
|
|
Value: dp.Value,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if l := len(nw.ReservedPorts); l != 0 {
|
|
|
|
structsTask.Resources.Networks[i].ReservedPorts = make([]structs.Port, l)
|
|
|
|
for j, rp := range nw.ReservedPorts {
|
|
|
|
structsTask.Resources.Networks[i].ReservedPorts[j] = structs.Port{
|
|
|
|
Label: rp.Label,
|
|
|
|
Value: rp.Value,
|
|
|
|
}
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
structsTask.LogConfig = &structs.LogConfig{
|
|
|
|
MaxFiles: *apiTask.LogConfig.MaxFiles,
|
|
|
|
MaxFileSizeMB: *apiTask.LogConfig.MaxFileSizeMB,
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
|
|
|
if l := len(apiTask.Artifacts); l != 0 {
|
|
|
|
structsTask.Artifacts = make([]*structs.TaskArtifact, l)
|
|
|
|
for k, ta := range apiTask.Artifacts {
|
|
|
|
structsTask.Artifacts[k] = &structs.TaskArtifact{
|
|
|
|
GetterSource: *ta.GetterSource,
|
|
|
|
GetterOptions: ta.GetterOptions,
|
|
|
|
RelativeDest: *ta.RelativeDest,
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
if apiTask.Vault != nil {
|
|
|
|
structsTask.Vault = &structs.Vault{
|
|
|
|
Policies: apiTask.Vault.Policies,
|
|
|
|
Env: *apiTask.Vault.Env,
|
|
|
|
ChangeMode: *apiTask.Vault.ChangeMode,
|
|
|
|
ChangeSignal: *apiTask.Vault.ChangeSignal,
|
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
|
|
|
if l := len(apiTask.Templates); l != 0 {
|
|
|
|
structsTask.Templates = make([]*structs.Template, l)
|
|
|
|
for i, template := range apiTask.Templates {
|
|
|
|
structsTask.Templates[i] = &structs.Template{
|
|
|
|
SourcePath: *template.SourcePath,
|
|
|
|
DestPath: *template.DestPath,
|
|
|
|
EmbeddedTmpl: *template.EmbeddedTmpl,
|
|
|
|
ChangeMode: *template.ChangeMode,
|
|
|
|
ChangeSignal: *template.ChangeSignal,
|
|
|
|
Splay: *template.Splay,
|
|
|
|
Perms: *template.Perms,
|
|
|
|
LeftDelim: *template.LeftDelim,
|
|
|
|
RightDelim: *template.RightDelim,
|
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-16 23:54:02 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
if apiTask.DispatchPayload != nil {
|
|
|
|
structsTask.DispatchPayload = &structs.DispatchPayloadConfig{
|
|
|
|
File: apiTask.DispatchPayload.File,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-02-13 23:18:17 +00:00
|
|
|
|
2017-03-03 23:00:39 +00:00
|
|
|
func ApiConstraintToStructs(c1 *api.Constraint, c2 *structs.Constraint) {
|
2017-02-13 23:18:17 +00:00
|
|
|
c2.LTarget = c1.LTarget
|
|
|
|
c2.RTarget = c1.RTarget
|
|
|
|
c2.Operand = c1.Operand
|
|
|
|
}
|