Making the job spec return api.Job

This commit is contained in:
Diptanu Choudhury 2017-02-06 11:48:28 -08:00 committed by Alex Dadgar
parent c62cd5cc55
commit 7567209857
24 changed files with 879 additions and 267 deletions

View File

@ -4,6 +4,8 @@ import (
"reflect"
"sort"
"testing"
"github.com/hashicorp/nomad/helper"
)
func TestAllocations_List(t *testing.T) {
@ -28,9 +30,9 @@ func TestAllocations_List(t *testing.T) {
return
job := &Job{
ID: "job1",
Name: "Job #1",
Type: JobTypeService,
ID: helper.StringToPtr("job1"),
Name: helper.StringToPtr("Job #1"),
Type: helper.StringToPtr(JobTypeService),
}
eval, _, err := c.Jobs().Register(job, nil)
if err != nil {
@ -74,10 +76,11 @@ func TestAllocations_PrefixList(t *testing.T) {
return
job := &Job{
ID: "job1",
Name: "Job #1",
Type: JobTypeService,
ID: helper.StringToPtr("job1"),
Name: helper.StringToPtr("Job #1"),
Type: helper.StringToPtr(JobTypeService),
}
eval, _, err := c.Jobs().Register(job, nil)
if err != nil {
t.Fatalf("err: %s", err)

View File

@ -3,6 +3,8 @@ package api
import (
"reflect"
"testing"
"github.com/hashicorp/nomad/helper"
)
func TestCompose(t *testing.T) {
@ -12,10 +14,10 @@ func TestCompose(t *testing.T) {
SetMeta("foo", "bar").
Constrain(NewConstraint("kernel.name", "=", "linux")).
Require(&Resources{
CPU: 1250,
MemoryMB: 1024,
DiskMB: 2048,
IOPS: 500,
CPU: helper.IntToPtr(1250),
MemoryMB: helper.IntToPtr(1024),
DiskMB: helper.IntToPtr(2048),
IOPS: helper.IntToPtr(500),
Networks: []*NetworkResource{
&NetworkResource{
CIDR: "0.0.0.0/0",
@ -40,11 +42,11 @@ func TestCompose(t *testing.T) {
// Check that the composed result looks correct
expect := &Job{
Region: "region1",
ID: "job1",
Name: "myjob",
Type: JobTypeService,
Priority: 2,
Region: helper.StringToPtr("region1"),
ID: helper.StringToPtr("job1"),
Name: helper.StringToPtr("myjob"),
Type: helper.StringToPtr(JobTypeService),
Priority: helper.IntToPtr(2),
Datacenters: []string{
"dc1",
},
@ -60,8 +62,8 @@ func TestCompose(t *testing.T) {
},
TaskGroups: []*TaskGroup{
&TaskGroup{
Name: "grp1",
Count: 2,
Name: helper.StringToPtr("grp1"),
Count: helper.IntToPtr(2),
Constraints: []*Constraint{
&Constraint{
LTarget: "kernel.name",
@ -74,10 +76,10 @@ func TestCompose(t *testing.T) {
Name: "task1",
Driver: "exec",
Resources: &Resources{
CPU: 1250,
MemoryMB: 1024,
DiskMB: 2048,
IOPS: 500,
CPU: helper.IntToPtr(1250),
MemoryMB: helper.IntToPtr(1024),
DiskMB: helper.IntToPtr(2048),
IOPS: helper.IntToPtr(500),
Networks: []*NetworkResource{
&NetworkResource{
CIDR: "0.0.0.0/0",

View File

@ -6,6 +6,9 @@ import (
"sort"
"strconv"
"time"
"github.com/gorhill/cronexpr"
"github.com/hashicorp/nomad/helper"
)
const (
@ -14,6 +17,9 @@ const (
// JobTypeBatch indicates a short-lived process
JobTypeBatch = "batch"
// PeriodicSpecCron is used for a cron spec.
PeriodicSpecCron = "cron"
)
const (
@ -32,6 +38,16 @@ func (c *Client) Jobs() *Jobs {
return &Jobs{client: c}
}
func (j *Jobs) Validate(job *Job, q *WriteOptions) (*JobValidateResponse, *WriteMeta, error) {
var resp JobValidateResponse
req := &JobValidateRequest{Job: job}
if q != nil {
req.WriteRequest = WriteRequest{Region: q.Region}
}
wm, err := j.client.write("/v1/validate/job", req, &resp, q)
return &resp, wm, err
}
// Register is used to register a new job. It returns the ID
// of the evaluation, along with any errors encountered.
func (j *Jobs) Register(job *Job, q *WriteOptions) (string, *WriteMeta, error) {
@ -162,7 +178,7 @@ func (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *Wr
Job: job,
Diff: diff,
}
wm, err := j.client.write("/v1/job/"+job.ID+"/plan", req, &resp, q)
wm, err := j.client.write("/v1/job/"+*job.ID+"/plan", req, &resp, q)
if err != nil {
return nil, nil, err
}
@ -207,10 +223,36 @@ type UpdateStrategy struct {
// PeriodicConfig is for serializing periodic config for a job.
type PeriodicConfig struct {
Enabled bool
Spec string
SpecType string
ProhibitOverlap bool
Enabled *bool
Spec *string
SpecType *string
ProhibitOverlap *bool
}
func (p *PeriodicConfig) Canonicalize() {
if p.Enabled == nil {
p.Enabled = helper.BoolToPtr(true)
}
if p.SpecType == nil {
p.SpecType = helper.StringToPtr(PeriodicSpecCron)
}
if p.ProhibitOverlap == nil {
p.ProhibitOverlap = helper.BoolToPtr(false)
}
}
// Next returns the closest time instant matching the spec that is after the
// passed time. If no matching instance exists, the zero value of time.Time is
// returned. The `time.Location` of the returned value matches that of the
// passed time.
func (p *PeriodicConfig) Next(fromTime time.Time) time.Time {
if *p.SpecType == PeriodicSpecCron {
if e, err := cronexpr.Parse(*p.Spec); err == nil {
return e.Next(fromTime)
}
}
return time.Time{}
}
// ParameterizedJobConfig is used to configure the parameterized job.
@ -222,13 +264,13 @@ type ParameterizedJobConfig struct {
// Job is used to serialize a job.
type Job struct {
Region string
ID string
ParentID string
Name string
Type string
Priority int
AllAtOnce bool
Region *string
ID *string
ParentID *string
Name *string
Type *string
Priority *int
AllAtOnce *bool
Datacenters []string
Constraints []*Constraint
TaskGroups []*TaskGroup
@ -237,12 +279,69 @@ type Job struct {
ParameterizedJob *ParameterizedJobConfig
Payload []byte
Meta map[string]string
VaultToken string
Status string
StatusDescription string
CreateIndex uint64
ModifyIndex uint64
JobModifyIndex uint64
VaultToken *string
Status *string
StatusDescription *string
CreateIndex *uint64
ModifyIndex *uint64
JobModifyIndex *uint64
}
// IsPeriodic returns whether a job is periodic.
func (j *Job) IsPeriodic() bool {
return j.Periodic != nil
}
// IsParameterized returns whether a job is parameterized job.
func (j *Job) IsParameterized() bool {
return j.ParameterizedJob != nil
}
func (j *Job) Canonicalize() {
if j.ID == nil {
j.ID = helper.StringToPtr("")
}
if j.Name == nil {
j.Name = j.ID
}
if j.Priority == nil {
j.Priority = helper.IntToPtr(50)
}
if j.Region == nil {
j.Region = helper.StringToPtr("global")
}
if j.Type == nil {
j.Type = helper.StringToPtr("service")
}
if j.AllAtOnce == nil {
j.AllAtOnce = helper.BoolToPtr(false)
}
if j.VaultToken == nil {
j.VaultToken = helper.StringToPtr("")
}
if j.Status == nil {
j.Status = helper.StringToPtr("")
}
if j.StatusDescription == nil {
j.StatusDescription = helper.StringToPtr("")
}
if j.CreateIndex == nil {
j.CreateIndex = helper.Uint64ToPtr(0)
}
if j.ModifyIndex == nil {
j.ModifyIndex = helper.Uint64ToPtr(0)
}
if j.JobModifyIndex == nil {
j.JobModifyIndex = helper.Uint64ToPtr(0)
}
if j.Periodic != nil {
j.Periodic.Canonicalize()
}
for _, tg := range j.TaskGroups {
tg.Canonicalize(*j.Type)
}
}
// JobSummary summarizes the state of the allocations of a job
@ -330,11 +429,11 @@ func NewBatchJob(id, name, region string, pri int) *Job {
// newJob is used to create a new Job struct.
func newJob(id, name, region, typ string, pri int) *Job {
return &Job{
Region: region,
ID: id,
Name: name,
Type: typ,
Priority: pri,
Region: &region,
ID: &id,
Name: &name,
Type: &typ,
Priority: &pri,
}
}
@ -371,6 +470,27 @@ func (j *Job) AddPeriodicConfig(cfg *PeriodicConfig) *Job {
return j
}
type WriteRequest struct {
// The target region for this write
Region string
}
// JobValidateRequest is used to validate a job
type JobValidateRequest struct {
Job *Job
WriteRequest
}
// JobValidateResponse is the response from validate request
type JobValidateResponse struct {
// DriverConfigValidated indicates whether the agent validated the driver
// config
DriverConfigValidated bool
// ValidationErrors is a list of validation errors
ValidationErrors []string
}
// RegisterJobRequest is used to serialize a job registration
type RegisterJobRequest struct {
Job *Job

View File

@ -6,6 +6,7 @@ import (
"strings"
"testing"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/testutil"
)
@ -45,11 +46,38 @@ func TestJobs_Register(t *testing.T) {
assertQueryMeta(t, qm)
// Check that we got the expected response
if len(resp) != 1 || resp[0].ID != job.ID {
if len(resp) != 1 || resp[0].ID != *job.ID {
t.Fatalf("bad: %#v", resp[0])
}
}
func TestJobs_Validate(t *testing.T) {
c, s := makeClient(t, nil, nil)
defer s.Stop()
jobs := c.Jobs()
// Create a job and attempt to register it
job := testJob()
resp, _, err := jobs.Validate(job, nil)
if err != nil {
t.Fatalf("err: %s", err)
}
if len(resp.ValidationErrors) != 0 {
t.Fatalf("bad %v", resp)
}
job.ID = nil
resp1, _, err := jobs.Validate(job, nil)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(resp1.ValidationErrors) == 0 {
t.Fatalf("bad %v", resp1)
}
}
func TestJobs_EnforceRegister(t *testing.T) {
c, s := makeClient(t, nil, nil)
defer s.Stop()
@ -96,7 +124,7 @@ func TestJobs_EnforceRegister(t *testing.T) {
t.Fatalf("bad length: %d", len(resp))
}
if resp[0].ID != job.ID {
if resp[0].ID != *job.ID {
t.Fatalf("bad: %#v", resp[0])
}
curIndex := resp[0].JobModifyIndex
@ -178,13 +206,13 @@ func TestJobs_PrefixList(t *testing.T) {
// Query the job again and ensure it exists
// Listing when nothing exists returns empty
results, qm, err = jobs.PrefixList(job.ID[:1])
results, qm, err = jobs.PrefixList((*job.ID)[:1])
if err != nil {
t.Fatalf("err: %s", err)
}
// Check if we have the right list
if len(results) != 1 || results[0].ID != job.ID {
if len(results) != 1 || results[0].ID != *job.ID {
t.Fatalf("bad: %#v", results)
}
}
@ -222,7 +250,7 @@ func TestJobs_List(t *testing.T) {
}
// Check if we have the right list
if len(results) != 1 || results[0].ID != job.ID {
if len(results) != 1 || results[0].ID != *job.ID {
t.Fatalf("bad: %#v", results)
}
}
@ -387,7 +415,7 @@ func TestJobs_PeriodicForce(t *testing.T) {
}
testutil.WaitForResult(func() (bool, error) {
out, _, err := jobs.Info(job.ID, nil)
out, _, err := jobs.Info(*job.ID, nil)
if err != nil || out == nil || out.ID != job.ID {
return false, err
}
@ -397,7 +425,7 @@ func TestJobs_PeriodicForce(t *testing.T) {
})
// Try force again
evalID, wm, err := jobs.PeriodicForce(job.ID, nil)
evalID, wm, err := jobs.PeriodicForce(*job.ID, nil)
if err != nil {
t.Fatalf("err: %s", err)
}
@ -519,10 +547,10 @@ func TestJobs_JobSummary(t *testing.T) {
assertQueryMeta(t, qm)
// Check that the result is what we expect
if job.ID != result.JobID {
if *job.ID != result.JobID {
t.Fatalf("err: expected job id of %s saw %s", job.ID, result.JobID)
}
if _, ok := result.Summary[taskName]; !ok {
if _, ok := result.Summary[*taskName]; !ok {
t.Fatalf("err: unable to find %s key in job summary", taskName)
}
}
@ -530,11 +558,11 @@ func TestJobs_JobSummary(t *testing.T) {
func TestJobs_NewBatchJob(t *testing.T) {
job := NewBatchJob("job1", "myjob", "region1", 5)
expect := &Job{
Region: "region1",
ID: "job1",
Name: "myjob",
Type: JobTypeBatch,
Priority: 5,
Region: helper.StringToPtr("region1"),
ID: helper.StringToPtr("job1"),
Name: helper.StringToPtr("myjob"),
Type: helper.StringToPtr(JobTypeBatch),
Priority: helper.IntToPtr(5),
}
if !reflect.DeepEqual(job, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, job)
@ -544,11 +572,11 @@ func TestJobs_NewBatchJob(t *testing.T) {
func TestJobs_NewServiceJob(t *testing.T) {
job := NewServiceJob("job1", "myjob", "region1", 5)
expect := &Job{
Region: "region1",
ID: "job1",
Name: "myjob",
Type: JobTypeService,
Priority: 5,
Region: helper.StringToPtr("region1"),
ID: helper.StringToPtr("job1"),
Name: helper.StringToPtr("myjob"),
Type: helper.StringToPtr(JobTypeService),
Priority: helper.IntToPtr(5),
}
if !reflect.DeepEqual(job, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, job)

View File

@ -1,15 +1,48 @@
package api
import "github.com/hashicorp/nomad/helper"
// Resources encapsulates the required resources of
// a given task or task group.
type Resources struct {
CPU int
MemoryMB int
DiskMB int
IOPS int
CPU *int
MemoryMB *int
DiskMB *int
IOPS *int
Networks []*NetworkResource
}
func MinResources() *Resources {
return &Resources{
CPU: helper.IntToPtr(100),
MemoryMB: helper.IntToPtr(10),
IOPS: helper.IntToPtr(0),
}
}
// Merge merges this resource with another resource.
func (r *Resources) Merge(other *Resources) {
if other == nil {
return
}
if other.CPU != nil {
r.CPU = other.CPU
}
if other.MemoryMB != nil {
r.MemoryMB = other.MemoryMB
}
if other.DiskMB != nil {
r.DiskMB = other.DiskMB
}
if other.IOPS != nil {
r.IOPS = other.IOPS
}
if len(other.Networks) != 0 {
r.Networks = other.Networks
}
}
type Port struct {
Label string
Value int

View File

@ -2,6 +2,8 @@ package api
import (
"time"
"github.com/hashicorp/nomad/helper"
)
// MemoryStats holds memory usage related stats
@ -84,15 +86,35 @@ type Service struct {
// EphemeralDisk is an ephemeral disk object
type EphemeralDisk struct {
Sticky bool
Migrate bool
SizeMB int `mapstructure:"size"`
Sticky *bool
Migrate *bool
SizeMB *int `mapstructure:"size"`
}
func DefaultEphemeralDisk() *EphemeralDisk {
return &EphemeralDisk{
Sticky: helper.BoolToPtr(false),
Migrate: helper.BoolToPtr(false),
SizeMB: helper.IntToPtr(300),
}
}
func (e *EphemeralDisk) Canonicalize() {
if e.Sticky == nil {
e.Sticky = helper.BoolToPtr(false)
}
if e.Migrate == nil {
e.Migrate = helper.BoolToPtr(false)
}
if e.SizeMB == nil {
e.SizeMB = helper.IntToPtr(300)
}
}
// TaskGroup is the unit of scheduling.
type TaskGroup struct {
Name string
Count int
Name *string
Count *int
Constraints []*Constraint
Tasks []*Task
RestartPolicy *RestartPolicy
@ -103,8 +125,43 @@ type TaskGroup struct {
// NewTaskGroup creates a new TaskGroup.
func NewTaskGroup(name string, count int) *TaskGroup {
return &TaskGroup{
Name: name,
Count: count,
Name: helper.StringToPtr(name),
Count: helper.IntToPtr(count),
}
}
func (g *TaskGroup) Canonicalize(jobType string) {
if g.Name == nil {
g.Name = helper.StringToPtr("")
}
if g.Count == nil {
g.Count = helper.IntToPtr(1)
}
for _, t := range g.Tasks {
t.Canonicalize()
}
if g.EphemeralDisk == nil {
g.EphemeralDisk = DefaultEphemeralDisk()
} else {
g.EphemeralDisk.Canonicalize()
}
if g.RestartPolicy == nil {
switch jobType {
case "service", "system":
g.RestartPolicy = &RestartPolicy{
Delay: 15 * time.Second,
Attempts: 2,
Interval: 1 * time.Minute,
Mode: "delay",
}
default:
g.RestartPolicy = &RestartPolicy{
Delay: 15 * time.Second,
Attempts: 15,
Interval: 7 * 24 * time.Hour,
Mode: "delay",
}
}
}
}
@ -137,8 +194,24 @@ func (g *TaskGroup) RequireDisk(disk *EphemeralDisk) *TaskGroup {
// LogConfig provides configuration for log rotation
type LogConfig struct {
MaxFiles int
MaxFileSizeMB int
MaxFiles *int
MaxFileSizeMB *int
}
func DefaultLogConfig() *LogConfig {
return &LogConfig{
MaxFiles: helper.IntToPtr(10),
MaxFileSizeMB: helper.IntToPtr(10),
}
}
func (l *LogConfig) Canonicalize() {
if l.MaxFiles == nil {
l.MaxFiles = helper.IntToPtr(10)
}
if l.MaxFileSizeMB == nil {
l.MaxFileSizeMB = helper.IntToPtr(10)
}
}
// DispatchPayloadConfig configures how a task gets its input from a job dispatch
@ -166,28 +239,79 @@ type Task struct {
Leader *bool
}
func (t *Task) Canonicalize() {
if t.LogConfig == nil {
t.LogConfig = DefaultLogConfig()
} else {
t.LogConfig.Canonicalize()
}
if t.Vault != nil {
t.Vault.Canonicalize()
}
for _, artifact := range t.Artifacts {
artifact.Canonicalize()
}
for _, tmpl := range t.Templates {
tmpl.Canonicalize()
}
min := MinResources()
min.Merge(t.Resources)
t.Resources = min
}
// TaskArtifact is used to download artifacts before running a task.
type TaskArtifact struct {
GetterSource string
GetterSource *string
GetterOptions map[string]string
RelativeDest string
RelativeDest *string
}
func (a *TaskArtifact) Canonicalize() {
if a.RelativeDest == nil {
a.RelativeDest = helper.StringToPtr("local/")
}
}
type Template struct {
SourcePath string
DestPath string
EmbeddedTmpl string
ChangeMode string
ChangeSignal string
Splay time.Duration
Perms string
SourcePath *string
DestPath *string
EmbeddedTmpl *string
ChangeMode *string
ChangeSignal *string
Splay *time.Duration
Perms *string
}
func (tmpl *Template) Canonicalize() {
if tmpl.ChangeMode == nil {
tmpl.ChangeMode = helper.StringToPtr("restart")
}
if tmpl.Splay == nil {
tmpl.Splay = helper.TimeToPtr(5 * time.Second)
}
if tmpl.Perms == nil {
tmpl.Perms = helper.StringToPtr("0644")
}
}
type Vault struct {
Policies []string
Env bool
ChangeMode string
ChangeSignal string
Env *bool
ChangeMode *string
ChangeSignal *string
}
func (v *Vault) Canonicalize() {
if v.Env == nil {
v.Env = helper.BoolToPtr(true)
}
if v.ChangeMode == nil {
v.ChangeMode = helper.StringToPtr("restart")
}
if v.ChangeSignal == nil {
v.ChangeSignal = helper.StringToPtr("sighup")
}
}
// NewTask creates and initializes a new Task.

View File

@ -3,13 +3,15 @@ package api
import (
"reflect"
"testing"
"github.com/hashicorp/nomad/helper"
)
func TestTaskGroup_NewTaskGroup(t *testing.T) {
grp := NewTaskGroup("grp1", 2)
expect := &TaskGroup{
Name: "grp1",
Count: 2,
Name: helper.StringToPtr("grp1"),
Count: helper.IntToPtr(2),
}
if !reflect.DeepEqual(grp, expect) {
t.Fatalf("expect: %#v, got: %#v", expect, grp)
@ -162,10 +164,10 @@ func TestTask_Require(t *testing.T) {
// Create some require resources
resources := &Resources{
CPU: 1250,
MemoryMB: 128,
DiskMB: 2048,
IOPS: 500,
CPU: helper.IntToPtr(1250),
MemoryMB: helper.IntToPtr(128),
DiskMB: helper.IntToPtr(2048),
IOPS: helper.IntToPtr(500),
Networks: []*NetworkResource{
&NetworkResource{
CIDR: "0.0.0.0/0",

View File

@ -1,6 +1,10 @@
package api
import "testing"
import (
"testing"
"github.com/hashicorp/nomad/helper"
)
func assertQueryMeta(t *testing.T, qm *QueryMeta) {
if qm.LastIndex == 0 {
@ -21,19 +25,19 @@ func testJob() *Job {
task := NewTask("task1", "exec").
SetConfig("command", "/bin/sleep").
Require(&Resources{
CPU: 100,
MemoryMB: 256,
IOPS: 10,
CPU: helper.IntToPtr(100),
MemoryMB: helper.IntToPtr(256),
IOPS: helper.IntToPtr(10),
}).
SetLogConfig(&LogConfig{
MaxFiles: 1,
MaxFileSizeMB: 2,
MaxFiles: helper.IntToPtr(1),
MaxFileSizeMB: helper.IntToPtr(2),
})
group := NewTaskGroup("group1", 1).
AddTask(task).
RequireDisk(&EphemeralDisk{
SizeMB: 25,
SizeMB: helper.IntToPtr(25),
})
job := NewBatchJob("job1", "redis", "region1", 1).
@ -45,9 +49,9 @@ func testJob() *Job {
func testPeriodicJob() *Job {
job := testJob().AddPeriodicConfig(&PeriodicConfig{
Enabled: true,
Spec: "*/30 * * * *",
SpecType: "cron",
Enabled: helper.BoolToPtr(true),
Spec: helper.StringToPtr("*/30 * * * *"),
SpecType: helper.StringToPtr("cron"),
})
return job
}

View File

@ -165,6 +165,8 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) {
s.mux.HandleFunc("/v1/agent/servers", s.wrap(s.AgentServersRequest))
s.mux.HandleFunc("/v1/agent/keyring/", s.wrap(s.KeyringOperationRequest))
s.mux.HandleFunc("/v1/validate/job", s.wrap(s.ValidateJobRequest))
s.mux.HandleFunc("/v1/regions", s.wrap(s.RegionListRequest))
s.mux.HandleFunc("/v1/status/leader", s.wrap(s.StatusLeaderRequest))

View File

@ -6,6 +6,8 @@ import (
"strings"
"github.com/golang/snappy"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
)
@ -111,6 +113,48 @@ func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request,
return out, nil
}
func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
// Ensure request method is POST or PUT
if !(req.Method == "POST" || req.Method == "PUT") {
return nil, CodedError(405, ErrInvalidMethod)
}
var validateRequest api.JobValidateRequest
if err := decodeBody(req, &validateRequest); err != nil {
return nil, CodedError(400, err.Error())
}
if validateRequest.Job == nil {
return nil, CodedError(400, "Job must be specified")
}
job := s.apiJobToStructJob(validateRequest.Job)
args := structs.JobValidateRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: validateRequest.Region,
},
}
s.parseRegion(req, &args.Region)
var out structs.JobValidateResponse
if err := s.agent.RPC("Job.Validate", &args, &out); err != nil {
// Fall back to do local validation
args.Job.Canonicalize()
if vErr := args.Job.Validate(); vErr != nil {
if merr, ok := err.(*multierror.Error); ok {
for _, err := range merr.Errors {
out.ValidationErrors = append(out.ValidationErrors, err.Error())
}
}
} else {
out.ValidationErrors = append(out.ValidationErrors, vErr.Error())
}
}
return out, nil
}
func (s *HTTPServer) periodicForceRequest(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
@ -310,3 +354,202 @@ func (s *HTTPServer) jobDispatchRequest(resp http.ResponseWriter, req *http.Requ
setIndex(resp, out.Index)
return out, nil
}
func (s *HTTPServer) apiJobToStructJob(job *api.Job) *structs.Job {
job.Canonicalize()
j := &structs.Job{
Region: *job.Region,
ID: *job.ID,
ParentID: *job.ID,
Name: *job.Name,
Type: *job.Type,
Priority: *job.Priority,
AllAtOnce: *job.AllAtOnce,
Datacenters: job.Datacenters,
Payload: job.Payload,
Meta: job.Meta,
VaultToken: *job.VaultToken,
Status: *job.Status,
StatusDescription: *job.StatusDescription,
CreateIndex: *job.CreateIndex,
ModifyIndex: *job.ModifyIndex,
JobModifyIndex: *job.ModifyIndex,
}
j.Constraints = make([]*structs.Constraint, len(job.Constraints))
for i, c := range job.Constraints {
j.Constraints[i] = &structs.Constraint{
LTarget: c.LTarget,
RTarget: c.RTarget,
Operand: c.Operand,
}
}
if job.Update != nil {
j.Update = structs.UpdateStrategy{
Stagger: job.Update.Stagger,
MaxParallel: job.Update.MaxParallel,
}
}
if job.Periodic != nil {
j.Periodic = &structs.PeriodicConfig{
Enabled: j.Periodic.Enabled,
Spec: j.Periodic.Spec,
SpecType: j.Periodic.SpecType,
ProhibitOverlap: j.Periodic.ProhibitOverlap,
}
}
if job.ParameterizedJob != nil {
j.ParameterizedJob = &structs.ParameterizedJobConfig{
Payload: job.ParameterizedJob.Payload,
MetaRequired: job.ParameterizedJob.MetaRequired,
MetaOptional: job.ParameterizedJob.MetaOptional,
}
}
j.TaskGroups = make([]*structs.TaskGroup, len(job.TaskGroups))
for i, taskGroup := range job.TaskGroups {
tg := &structs.TaskGroup{}
s.apiTgToStructsTG(taskGroup, tg)
j.TaskGroups[i] = tg
}
return j
}
func (s *HTTPServer) apiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) {
tg.Name = *taskGroup.Name
tg.Count = *taskGroup.Count
tg.Meta = taskGroup.Meta
tg.Constraints = make([]*structs.Constraint, len(taskGroup.Constraints))
for k, constraint := range taskGroup.Constraints {
tg.Constraints[k] = &structs.Constraint{
LTarget: constraint.LTarget,
RTarget: constraint.RTarget,
Operand: constraint.Operand,
}
}
tg.RestartPolicy = &structs.RestartPolicy{
Attempts: taskGroup.RestartPolicy.Attempts,
Interval: taskGroup.RestartPolicy.Interval,
Delay: taskGroup.RestartPolicy.Delay,
Mode: taskGroup.RestartPolicy.Mode,
}
tg.EphemeralDisk = &structs.EphemeralDisk{
Sticky: *taskGroup.EphemeralDisk.Sticky,
SizeMB: *taskGroup.EphemeralDisk.SizeMB,
Migrate: *taskGroup.EphemeralDisk.Migrate,
}
tg.Meta = taskGroup.Meta
tg.Tasks = make([]*structs.Task, len(taskGroup.Tasks))
for l, task := range taskGroup.Tasks {
t := &structs.Task{}
s.apiTaskToStructsTask(task, t)
tg.Tasks[l] = t
}
}
func (s *HTTPServer) apiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) {
structsTask.Name = apiTask.Driver
structsTask.Driver = apiTask.Driver
structsTask.User = apiTask.User
structsTask.Config = apiTask.Config
structsTask.Constraints = make([]*structs.Constraint, len(apiTask.Constraints))
for i, constraint := range apiTask.Constraints {
structsTask.Constraints[i] = &structs.Constraint{
LTarget: constraint.LTarget,
RTarget: constraint.RTarget,
Operand: constraint.Operand,
}
}
structsTask.Env = apiTask.Env
structsTask.Services = make([]*structs.Service, len(apiTask.Services))
for i, service := range apiTask.Services {
structsTask.Services[i] = &structs.Service{
Name: service.Name,
PortLabel: service.PortLabel,
Tags: service.Tags,
}
structsTask.Services[i].Checks = make([]*structs.ServiceCheck, len(service.Checks))
for j, check := range service.Checks {
structsTask.Services[i].Checks[j] = &structs.ServiceCheck{
Name: check.Name,
Type: check.Type,
Command: check.Command,
Args: check.Args,
Path: check.Path,
Protocol: check.Protocol,
PortLabel: check.PortLabel,
Interval: check.Interval,
Timeout: check.Timeout,
InitialStatus: check.InitialStatus,
}
}
}
structsTask.Resources = &structs.Resources{
CPU: *apiTask.Resources.CPU,
MemoryMB: *apiTask.Resources.MemoryMB,
IOPS: *apiTask.Resources.IOPS,
}
structsTask.Resources.Networks = make([]*structs.NetworkResource, len(apiTask.Resources.Networks))
for i, nw := range apiTask.Resources.Networks {
structsTask.Resources.Networks[i] = &structs.NetworkResource{
CIDR: nw.CIDR,
IP: nw.IP,
MBits: nw.MBits,
}
structsTask.Resources.Networks[i].DynamicPorts = make([]structs.Port, len(structsTask.Resources.Networks[i].DynamicPorts))
structsTask.Resources.Networks[i].ReservedPorts = make([]structs.Port, len(structsTask.Resources.Networks[i].ReservedPorts))
for j, dp := range nw.DynamicPorts {
structsTask.Resources.Networks[i].DynamicPorts[j] = structs.Port{
Label: dp.Label,
Value: dp.Value,
}
}
for j, rp := range nw.ReservedPorts {
structsTask.Resources.Networks[i].ReservedPorts[j] = structs.Port{
Label: rp.Label,
Value: rp.Value,
}
}
}
structsTask.Meta = apiTask.Meta
structsTask.KillTimeout = apiTask.KillTimeout
structsTask.LogConfig = &structs.LogConfig{
MaxFiles: *apiTask.LogConfig.MaxFiles,
MaxFileSizeMB: *apiTask.LogConfig.MaxFileSizeMB,
}
structsTask.Artifacts = make([]*structs.TaskArtifact, len(apiTask.Artifacts))
for k, ta := range apiTask.Artifacts {
structsTask.Artifacts[k] = &structs.TaskArtifact{
GetterSource: *ta.GetterSource,
GetterOptions: ta.GetterOptions,
RelativeDest: *ta.RelativeDest,
}
}
if apiTask.Vault != nil {
structsTask.Vault = &structs.Vault{
Policies: apiTask.Vault.Policies,
Env: *apiTask.Vault.Env,
ChangeMode: *apiTask.Vault.ChangeMode,
ChangeSignal: *apiTask.Vault.ChangeSignal,
}
}
structsTask.Templates = make([]*structs.Template, len(apiTask.Templates))
for i, template := range apiTask.Templates {
structsTask.Templates[i] = &structs.Template{
SourcePath: *template.SourcePath,
DestPath: *template.DestPath,
EmbeddedTmpl: *template.EmbeddedTmpl,
ChangeMode: *template.ChangeMode,
ChangeSignal: *template.ChangeSignal,
Splay: *template.Splay,
Perms: *template.Perms,
}
}
if apiTask.DispatchPayload != nil {
structsTask.DispatchPayload = &structs.DispatchPayloadConfig{
File: apiTask.DispatchPayload.File,
}
}
}

View File

@ -410,8 +410,8 @@ func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task str
}
// Display the rolled up stats. If possible prefer the live statistics
cpuUsage := strconv.Itoa(resource.CPU)
memUsage := humanize.IBytes(uint64(resource.MemoryMB * bytesPerMegabyte))
cpuUsage := strconv.Itoa(*resource.CPU)
memUsage := humanize.IBytes(uint64(*resource.MemoryMB * bytesPerMegabyte))
if stats != nil {
if ru, ok := stats.Tasks[task]; ok && ru != nil && ru.ResourceUsage != nil {
if cs := ru.ResourceUsage.CpuStats; cs != nil {
@ -425,7 +425,7 @@ func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task str
resourcesOutput = append(resourcesOutput, fmt.Sprintf("%v MHz|%v|%v|%v|%v",
cpuUsage,
memUsage,
humanize.IBytes(uint64(resource.DiskMB*bytesPerMegabyte)),
humanize.IBytes(uint64(*resource.DiskMB*bytesPerMegabyte)),
resource.IOPS,
firstAddr))
for i := 1; i < len(addr); i++ {

View File

@ -12,7 +12,6 @@ import (
gg "github.com/hashicorp/go-getter"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/jobspec"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/ryanuber/columnize"
)
@ -235,7 +234,7 @@ type JobGetter struct {
}
// StructJob returns the Job struct from jobfile.
func (j *JobGetter) StructJob(jpath string) (*structs.Job, error) {
func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) {
var jobfile io.Reader
switch jpath {
case "-":

View File

@ -169,7 +169,7 @@ func (l *LogsCommand) Run(args []string) int {
// Try to determine the tasks name from the allocation
var tasks []*api.Task
for _, tg := range alloc.Job.TaskGroups {
if tg.Name == alloc.TaskGroup {
if *tg.Name == alloc.TaskGroup {
if len(tg.Tasks) == 1 {
task = tg.Tasks[0].Name
break

View File

@ -11,6 +11,7 @@ import (
"github.com/mitchellh/colorstring"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
)
const (
@ -487,10 +488,10 @@ func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation,
// Get Resources
var cpu, mem, disk, iops int
for _, alloc := range runningAllocs {
cpu += alloc.Resources.CPU
mem += alloc.Resources.MemoryMB
disk += alloc.Resources.DiskMB
iops += alloc.Resources.IOPS
cpu += *alloc.Resources.CPU
mem += *alloc.Resources.MemoryMB
disk += *alloc.Resources.DiskMB
iops += *alloc.Resources.IOPS
}
resources := make([]string, 2)
@ -499,9 +500,9 @@ func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation,
cpu,
total.CPU,
humanize.IBytes(uint64(mem*bytesPerMegabyte)),
humanize.IBytes(uint64(total.MemoryMB*bytesPerMegabyte)),
humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)),
humanize.IBytes(uint64(disk*bytesPerMegabyte)),
humanize.IBytes(uint64(total.DiskMB*bytesPerMegabyte)),
humanize.IBytes(uint64(*total.DiskMB*bytesPerMegabyte)),
iops,
total.IOPS)
@ -518,10 +519,10 @@ func computeNodeTotalResources(node *api.Node) api.Resources {
if res == nil {
res = &api.Resources{}
}
total.CPU = r.CPU - res.CPU
total.MemoryMB = r.MemoryMB - res.MemoryMB
total.DiskMB = r.DiskMB - res.DiskMB
total.IOPS = r.IOPS - res.IOPS
total.CPU = helper.IntToPtr(*r.CPU - *res.CPU)
total.MemoryMB = helper.IntToPtr(*r.MemoryMB - *res.MemoryMB)
total.DiskMB = helper.IntToPtr(*r.DiskMB - *res.DiskMB)
total.IOPS = helper.IntToPtr(*r.IOPS - *res.IOPS)
return total
}
@ -550,7 +551,7 @@ func getActualResources(client *api.Client, runningAllocs []*api.Allocation, nod
math.Floor(cpu),
total.CPU,
humanize.IBytes(mem),
humanize.IBytes(uint64(total.MemoryMB*bytesPerMegabyte)))
humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)))
return resources, nil
}

View File

@ -7,7 +7,6 @@ import (
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/scheduler"
"github.com/mitchellh/colorstring"
)
@ -99,28 +98,12 @@ func (c *PlanCommand) Run(args []string) int {
path := args[0]
// Get Job struct from Jobfile
job, err := c.JobGetter.StructJob(args[0])
job, err := c.JobGetter.ApiJob(args[0])
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err))
return 255
}
// Initialize any fields that need to be.
job.Canonicalize()
// Check that the job is valid
if err := job.Validate(); err != nil {
c.Ui.Error(fmt.Sprintf("Error validating job: %s", err))
return 255
}
// Convert it to something we can use
apiJob, err := convertStructJob(job)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error converting job: %s", err))
return 255
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
@ -129,12 +112,12 @@ func (c *PlanCommand) Run(args []string) int {
}
// Force the region to be that of the job.
if r := job.Region; r != "" {
client.SetRegion(r)
if r := job.Region; r != nil {
client.SetRegion(*r)
}
// Submit the job
resp, _, err := client.Jobs().Plan(apiJob, diff, nil)
resp, _, err := client.Jobs().Plan(job, diff, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error during plan: %s", err))
return 255
@ -179,7 +162,7 @@ func formatJobModifyIndex(jobModifyIndex uint64, jobName string) string {
}
// formatDryRun produces a string explaining the results of the dry run.
func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string {
func formatDryRun(resp *api.JobPlanResponse, job *api.Job) string {
var rolling *api.Evaluation
for _, eval := range resp.CreatedEvals {
if eval.TriggeredBy == "rolling-update" {
@ -192,7 +175,7 @@ func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string {
out = "[bold][green]- All tasks successfully allocated.[reset]\n"
} else {
// Change the output depending on if we are a system job or not
if job.Type == "system" {
if job.Type != nil && *job.Type == "system" {
out = "[bold][yellow]- WARNING: Failed to place allocations on all nodes.[reset]\n"
} else {
out = "[bold][yellow]- WARNING: Failed to place all allocations.[reset]\n"

View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
)
@ -132,21 +133,24 @@ func (c *RunCommand) Run(args []string) int {
}
// Get Job struct from Jobfile
job, err := c.JobGetter.StructJob(args[0])
job, err := c.JobGetter.ApiJob(args[0])
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err))
return 1
}
// Initialize any fields that need to be.
job.Canonicalize()
// Check that the job is valid
if err := job.Validate(); err != nil {
c.Ui.Error(fmt.Sprintf("Error validating job: %v", err))
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Force the region to be that of the job.
if r := job.Region; r != nil {
client.SetRegion(*r)
}
// Check if the job is periodic or is a parameterized job
periodic := job.IsPeriodic()
paramjob := job.IsParameterized()
@ -158,35 +162,24 @@ func (c *RunCommand) Run(args []string) int {
}
if vaultToken != "" {
job.VaultToken = vaultToken
}
// Convert it to something we can use
apiJob, err := convertStructJob(job)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error converting job: %s", err))
return 1
job.VaultToken = helper.StringToPtr(vaultToken)
}
// COMPAT 0.4.1 -> 0.5 Remove in 0.6
if apiJob.TaskGroups != nil {
OUTSIDE:
for _, tg := range apiJob.TaskGroups {
if tg.Tasks != nil {
for _, task := range tg.Tasks {
if task.Resources != nil {
if task.Resources.DiskMB > 0 {
c.Ui.Error("WARNING: disk attribute is deprecated in the resources block. See https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html")
break OUTSIDE
}
}
OUTSIDE:
for _, tg := range job.TaskGroups {
for _, task := range tg.Tasks {
if task.Resources != nil {
if task.Resources.DiskMB != nil {
c.Ui.Error("WARNING: disk attribute is deprecated in the resources block. See https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html")
break OUTSIDE
}
}
}
}
if output {
req := api.RegisterJobRequest{Job: apiJob}
req := api.RegisterJobRequest{Job: job}
buf, err := json.MarshalIndent(req, "", " ")
if err != nil {
c.Ui.Error(fmt.Sprintf("Error converting job: %s", err))
@ -197,18 +190,6 @@ func (c *RunCommand) Run(args []string) int {
return 0
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Force the region to be that of the job.
if r := job.Region; r != "" {
client.SetRegion(r)
}
// Parse the check-index
checkIndex, enforce, err := parseCheckIndex(checkIndexStr)
if err != nil {
@ -219,9 +200,9 @@ func (c *RunCommand) Run(args []string) int {
// Submit the job
var evalID string
if enforce {
evalID, _, err = client.Jobs().EnforceRegister(apiJob, checkIndex, nil)
evalID, _, err = client.Jobs().EnforceRegister(job, checkIndex, nil)
} else {
evalID, _, err = client.Jobs().Register(apiJob, nil)
evalID, _, err = client.Jobs().Register(job, nil)
}
if err != nil {
if strings.Contains(err.Error(), api.RegisterEnforceIndexErrPrefix) {

View File

@ -215,7 +215,7 @@ func (c *StatusCommand) outputPeriodicInfo(client *api.Client, job *api.Job) err
for _, child := range children {
// Ensure that we are only showing jobs whose parent is the requested
// job.
if child.ParentID != job.ID {
if child.ParentID != *job.ID {
continue
}
@ -262,7 +262,7 @@ func (c *StatusCommand) outputParameterizedInfo(client *api.Client, job *api.Job
for _, child := range children {
// Ensure that we are only showing jobs whose parent is the requested
// job.
if child.ParentID != job.ID {
if child.ParentID != *job.ID {
continue
}
@ -282,13 +282,13 @@ func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error {
var evals, allocs []string
// Query the allocations
jobAllocs, _, err := client.Jobs().Allocations(job.ID, c.allAllocs, nil)
jobAllocs, _, err := client.Jobs().Allocations(*job.ID, c.allAllocs, nil)
if err != nil {
return fmt.Errorf("Error querying job allocations: %s", err)
}
// Query the evaluations
jobEvals, _, err := client.Jobs().Evaluations(job.ID, nil)
jobEvals, _, err := client.Jobs().Evaluations(*job.ID, nil)
if err != nil {
return fmt.Errorf("Error querying job evaluations: %s", err)
}
@ -366,7 +366,7 @@ func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error {
// where appropriate
func (c *StatusCommand) outputJobSummary(client *api.Client, job *api.Job) error {
// Query the summary
summary, _, err := client.Jobs().Summary(job.ID, nil)
summary, _, err := client.Jobs().Summary(*job.ID, nil)
if err != nil {
return fmt.Errorf("Error querying job summary: %s", err)
}

View File

@ -109,7 +109,7 @@ func (c *StopCommand) Run(args []string) int {
}
// Confirm the stop if the job was a prefix match.
if jobID != job.ID && !autoYes {
if jobID != *job.ID && !autoYes {
question := fmt.Sprintf("Are you sure you want to stop job %q? [y/N]", job.ID)
answer, err := c.Ui.Ask(question)
if err != nil {
@ -132,7 +132,7 @@ func (c *StopCommand) Run(args []string) int {
}
// Invoke the stop
evalID, _, err := client.Jobs().Deregister(job.ID, nil)
evalID, _, err := client.Jobs().Deregister(*job.ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error deregistering job: %s", err))
return 1

View File

@ -43,17 +43,26 @@ func (c *ValidateCommand) Run(args []string) int {
}
// Get Job struct from Jobfile
job, err := c.JobGetter.StructJob(args[0])
job, err := c.JobGetter.ApiJob(args[0])
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err))
return 1
}
// Initialize any fields that need to be.
job.Canonicalize()
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 255
}
// Force the region to be that of the job.
if r := job.Region; r != nil {
client.SetRegion(*r)
}
// Check that the job is valid
if err := job.Validate(); err != nil {
if _, _, err := client.Jobs().Validate(job, nil); err != nil {
c.Ui.Error(fmt.Sprintf("Error validating job: %s", err))
return 1
}

View File

@ -1,6 +1,9 @@
package helper
import "regexp"
import (
"regexp"
"time"
)
// validUUID is used to check if a given string looks like a UUID
var validUUID = regexp.MustCompile(`(?i)^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$`)
@ -20,6 +23,26 @@ func BoolToPtr(b bool) *bool {
return &b
}
// IntToPtr returns the pointer to an int
func IntToPtr(i int) *int {
return &i
}
// UintToPtr returns the pointer to an uint
func Uint64ToPtr(u uint64) *uint64 {
return &u
}
// StringToPtr returns the pointer to a string
func StringToPtr(str string) *string {
return &str
}
// TimeToPtr returns the pointer to a time stamp
func TimeToPtr(t time.Duration) *time.Duration {
return &t
}
// MapStringStringSliceValueSet returns the set of values in a map[string][]string
func MapStringStringSliceValueSet(m map[string][]string) []string {
set := make(map[string]struct{})

View File

@ -9,11 +9,14 @@ import (
"regexp"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/client/driver"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/mitchellh/mapstructure"
)
@ -25,7 +28,7 @@ var errPortLabel = fmt.Errorf("Port label does not conform to naming requirement
//
// Due to current internal limitations, the entire contents of the
// io.Reader will be copied into memory first before parsing.
func Parse(r io.Reader) (*structs.Job, error) {
func Parse(r io.Reader) (*api.Job, error) {
// Copy the reader into an in-memory buffer first since HCL requires it.
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
@ -53,7 +56,7 @@ func Parse(r io.Reader) (*structs.Job, error) {
return nil, err
}
var job structs.Job
var job api.Job
// Parse the job out
matches := list.Filter("job")
@ -68,7 +71,7 @@ func Parse(r io.Reader) (*structs.Job, error) {
}
// ParseFile parses the given path as a job spec.
func ParseFile(path string) (*structs.Job, error) {
func ParseFile(path string) (*api.Job, error) {
path, err := filepath.Abs(path)
if err != nil {
return nil, err
@ -83,7 +86,7 @@ func ParseFile(path string) (*structs.Job, error) {
return Parse(f)
}
func parseJob(result *structs.Job, list *ast.ObjectList) error {
func parseJob(result *api.Job, list *ast.ObjectList) error {
if len(list.Items) != 1 {
return fmt.Errorf("only one 'job' block allowed")
}
@ -108,13 +111,13 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error {
delete(m, "parameterized")
// Set the ID and name to the object key
result.ID = obj.Keys[0].Token.Value().(string)
result.ID = helper.StringToPtr(obj.Keys[0].Token.Value().(string))
result.Name = result.ID
// Defaults
result.Priority = 50
result.Region = "global"
result.Type = "service"
result.Priority = helper.IntToPtr(50)
result.Region = helper.StringToPtr("global")
result.Type = helper.StringToPtr("service")
// Decode the rest
if err := mapstructure.WeakDecode(m, result); err != nil {
@ -196,18 +199,20 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error {
// If we have tasks outside, create TaskGroups for them
if o := listVal.Filter("task"); len(o.Items) > 0 {
var tasks []*structs.Task
if err := parseTasks(result.Name, "", &tasks, o); err != nil {
var tasks []*api.Task
if err := parseTasks(*result.Name, "", &tasks, o); err != nil {
return multierror.Prefix(err, "task:")
}
result.TaskGroups = make([]*structs.TaskGroup, len(tasks), len(tasks)*2)
result.TaskGroups = make([]*api.TaskGroup, len(tasks), len(tasks)*2)
for i, t := range tasks {
result.TaskGroups[i] = &structs.TaskGroup{
Name: t.Name,
Count: 1,
EphemeralDisk: structs.DefaultEphemeralDisk(),
Tasks: []*structs.Task{t},
result.TaskGroups[i] = &api.TaskGroup{
Name: helper.StringToPtr(t.Name),
Count: helper.IntToPtr(1),
EphemeralDisk: &api.EphemeralDisk{
SizeMB: helper.IntToPtr(300),
},
Tasks: []*api.Task{t},
}
}
}
@ -221,7 +226,11 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error {
// If we have a vault block, then parse that
if o := listVal.Filter("vault"); len(o.Items) > 0 {
jobVault := structs.DefaultVaultBlock()
jobVault := &api.Vault{
Env: helper.BoolToPtr(true),
ChangeMode: helper.StringToPtr("restart"),
}
if err := parseVault(jobVault, o); err != nil {
return multierror.Prefix(err, "vault ->")
}
@ -239,14 +248,14 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error {
return nil
}
func parseGroups(result *structs.Job, list *ast.ObjectList) error {
func parseGroups(result *api.Job, list *ast.ObjectList) error {
list = list.Children()
if len(list.Items) == 0 {
return nil
}
// Go through each object and turn it into an actual result.
collection := make([]*structs.TaskGroup, 0, len(list.Items))
collection := make([]*api.TaskGroup, 0, len(list.Items))
seen := make(map[string]struct{})
for _, item := range list.Items {
n := item.Keys[0].Token.Value().(string)
@ -296,8 +305,8 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error {
}
// Build the group with the basic decode
var g structs.TaskGroup
g.Name = n
var g api.TaskGroup
g.Name = helper.StringToPtr(n)
if err := mapstructure.WeakDecode(m, &g); err != nil {
return err
}
@ -317,7 +326,9 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error {
}
// Parse ephemeral disk
g.EphemeralDisk = structs.DefaultEphemeralDisk()
g.EphemeralDisk = &api.EphemeralDisk{
SizeMB: helper.IntToPtr(300),
}
if o := listVal.Filter("ephemeral_disk"); len(o.Items) > 0 {
if err := parseEphemeralDisk(&g.EphemeralDisk, o); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', ephemeral_disk ->", n))
@ -340,14 +351,18 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error {
// Parse tasks
if o := listVal.Filter("task"); len(o.Items) > 0 {
if err := parseTasks(result.Name, g.Name, &g.Tasks, o); err != nil {
if err := parseTasks(*result.Name, *g.Name, &g.Tasks, o); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', task:", n))
}
}
// If we have a vault block, then parse that
if o := listVal.Filter("vault"); len(o.Items) > 0 {
tgVault := structs.DefaultVaultBlock()
tgVault := &api.Vault{
Env: helper.BoolToPtr(true),
ChangeMode: helper.StringToPtr("restart"),
}
if err := parseVault(tgVault, o); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n))
}
@ -367,7 +382,7 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error {
return nil
}
func parseRestartPolicy(final **structs.RestartPolicy, list *ast.ObjectList) error {
func parseRestartPolicy(final **api.RestartPolicy, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'restart' block allowed")
@ -392,7 +407,7 @@ func parseRestartPolicy(final **structs.RestartPolicy, list *ast.ObjectList) err
return err
}
var result structs.RestartPolicy
var result api.RestartPolicy
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
@ -409,7 +424,7 @@ func parseRestartPolicy(final **structs.RestartPolicy, list *ast.ObjectList) err
return nil
}
func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error {
func parseConstraints(result *[]*api.Constraint, list *ast.ObjectList) error {
for _, o := range list.Elem().Items {
// Check for invalid keys
valid := []string{
@ -470,7 +485,7 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error
}
// Build the constraint
var c structs.Constraint
var c api.Constraint
if err := mapstructure.WeakDecode(m, &c); err != nil {
return err
}
@ -484,7 +499,7 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error
return nil
}
func parseEphemeralDisk(result **structs.EphemeralDisk, list *ast.ObjectList) error {
func parseEphemeralDisk(result **api.EphemeralDisk, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'ephemeral_disk' block allowed")
@ -508,7 +523,7 @@ func parseEphemeralDisk(result **structs.EphemeralDisk, list *ast.ObjectList) er
return err
}
var ephemeralDisk structs.EphemeralDisk
var ephemeralDisk api.EphemeralDisk
if err := mapstructure.WeakDecode(m, &ephemeralDisk); err != nil {
return err
}
@ -534,7 +549,7 @@ func parseBool(value interface{}) (bool, error) {
return enabled, err
}
func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, list *ast.ObjectList) error {
func parseTasks(jobName string, taskGroupName string, result *[]*api.Task, list *ast.ObjectList) error {
list = list.Children()
if len(list.Items) == 0 {
return nil
@ -598,7 +613,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
delete(m, "vault")
// Build the task
var t structs.Task
var t api.Task
t.Name = n
if taskGroupName == "" {
taskGroupName = n
@ -688,7 +703,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
// If we have resources, then parse that
if o := listVal.Filter("resources"); len(o.Items) > 0 {
var r structs.Resources
var r api.Resources
if err := parseResources(&r, o); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s',", n))
}
@ -697,7 +712,11 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
}
// If we have logs then parse that
logConfig := structs.DefaultLogConfig()
logConfig := &api.LogConfig{
MaxFiles: helper.IntToPtr(10),
MaxFileSizeMB: helper.IntToPtr(10),
}
if o := listVal.Filter("logs"); len(o.Items) > 0 {
if len(o.Items) > 1 {
return fmt.Errorf("only one logs block is allowed in a Task. Number of logs block found: %d", len(o.Items))
@ -740,7 +759,11 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
// If we have a vault block, then parse that
if o := listVal.Filter("vault"); len(o.Items) > 0 {
v := structs.DefaultVaultBlock()
v := &api.Vault{
Env: helper.BoolToPtr(true),
ChangeMode: helper.StringToPtr("restart"),
}
if err := parseVault(v, o); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n))
}
@ -768,7 +791,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
return err
}
t.DispatchPayload = &structs.DispatchPayloadConfig{}
t.DispatchPayload = &api.DispatchPayloadConfig{}
if err := mapstructure.WeakDecode(m, t.DispatchPayload); err != nil {
return err
}
@ -780,7 +803,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
return nil
}
func parseArtifacts(result *[]*structs.TaskArtifact, list *ast.ObjectList) error {
func parseArtifacts(result *[]*api.TaskArtifact, list *ast.ObjectList) error {
for _, o := range list.Elem().Items {
// Check for invalid keys
valid := []string{
@ -804,7 +827,7 @@ func parseArtifacts(result *[]*structs.TaskArtifact, list *ast.ObjectList) error
m["destination"] = "local/"
}
var ta structs.TaskArtifact
var ta api.TaskArtifact
if err := mapstructure.WeakDecode(m, &ta); err != nil {
return err
}
@ -851,7 +874,7 @@ func parseArtifactOption(result map[string]string, list *ast.ObjectList) error {
return nil
}
func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error {
func parseTemplates(result *[]*api.Template, list *ast.ObjectList) error {
for _, o := range list.Elem().Items {
// Check for invalid keys
valid := []string{
@ -872,7 +895,12 @@ func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error {
return err
}
templ := structs.DefaultTemplate()
templ := &api.Template{
ChangeMode: helper.StringToPtr("restart"),
Splay: helper.TimeToPtr(5 * time.Second),
Perms: helper.StringToPtr("0644"),
}
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
@ -891,8 +919,8 @@ func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error {
return nil
}
func parseServices(jobName string, taskGroupName string, task *structs.Task, serviceObjs *ast.ObjectList) error {
task.Services = make([]*structs.Service, len(serviceObjs.Items))
func parseServices(jobName string, taskGroupName string, task *api.Task, serviceObjs *ast.ObjectList) error {
task.Services = make([]api.Service, len(serviceObjs.Items))
var defaultServiceName bool
for idx, o := range serviceObjs.Items {
// Check for invalid keys
@ -906,7 +934,7 @@ func parseServices(jobName string, taskGroupName string, task *structs.Task, ser
return multierror.Prefix(err, fmt.Sprintf("service (%d) ->", idx))
}
var service structs.Service
var service api.Service
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return err
@ -941,14 +969,14 @@ func parseServices(jobName string, taskGroupName string, task *structs.Task, ser
}
}
task.Services[idx] = &service
task.Services[idx] = service
}
return nil
}
func parseChecks(service *structs.Service, checkObjs *ast.ObjectList) error {
service.Checks = make([]*structs.ServiceCheck, len(checkObjs.Items))
func parseChecks(service *api.Service, checkObjs *ast.ObjectList) error {
service.Checks = make([]api.ServiceCheck, len(checkObjs.Items))
for idx, co := range checkObjs.Items {
// Check for invalid keys
valid := []string{
@ -967,7 +995,7 @@ func parseChecks(service *structs.Service, checkObjs *ast.ObjectList) error {
return multierror.Prefix(err, "check ->")
}
var check structs.ServiceCheck
var check api.ServiceCheck
var cm map[string]interface{}
if err := hcl.DecodeObject(&cm, co.Val); err != nil {
return err
@ -984,13 +1012,13 @@ func parseChecks(service *structs.Service, checkObjs *ast.ObjectList) error {
return err
}
service.Checks[idx] = &check
service.Checks[idx] = check
}
return nil
}
func parseResources(result *structs.Resources, list *ast.ObjectList) error {
func parseResources(result *api.Resources, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) == 0 {
return nil
@ -1047,7 +1075,7 @@ func parseResources(result *structs.Resources, list *ast.ObjectList) error {
return multierror.Prefix(err, "resources, network ->")
}
var r structs.NetworkResource
var r api.NetworkResource
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Items[0].Val); err != nil {
return err
@ -1066,17 +1094,17 @@ func parseResources(result *structs.Resources, list *ast.ObjectList) error {
return multierror.Prefix(err, "resources, network, ports ->")
}
result.Networks = []*structs.NetworkResource{&r}
result.Networks = []*api.NetworkResource{&r}
}
// Combine the parsed resources with a default resource block.
min := structs.DefaultResources()
min := api.MinResources()
min.Merge(result)
*result = *min
return nil
}
func parsePorts(networkObj *ast.ObjectList, nw *structs.NetworkResource) error {
func parsePorts(networkObj *ast.ObjectList, nw *api.NetworkResource) error {
// Check for invalid keys
valid := []string{
"mbits",
@ -1101,7 +1129,7 @@ func parsePorts(networkObj *ast.ObjectList, nw *structs.NetworkResource) error {
return fmt.Errorf("found a port label collision: %s", label)
}
var p map[string]interface{}
var res structs.Port
var res api.Port
if err := hcl.DecodeObject(&p, port.Val); err != nil {
return err
}
@ -1119,7 +1147,7 @@ func parsePorts(networkObj *ast.ObjectList, nw *structs.NetworkResource) error {
return nil
}
func parseUpdate(result *structs.UpdateStrategy, list *ast.ObjectList) error {
func parseUpdate(result **api.UpdateStrategy, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'update' block allowed per job")
@ -1153,7 +1181,7 @@ func parseUpdate(result *structs.UpdateStrategy, list *ast.ObjectList) error {
return dec.Decode(m)
}
func parsePeriodic(result **structs.PeriodicConfig, list *ast.ObjectList) error {
func parsePeriodic(result **api.PeriodicConfig, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'periodic' block allowed per job")
@ -1195,7 +1223,7 @@ func parsePeriodic(result **structs.PeriodicConfig, list *ast.ObjectList) error
}
// Build the constraint
var p structs.PeriodicConfig
var p api.PeriodicConfig
if err := mapstructure.WeakDecode(m, &p); err != nil {
return err
}
@ -1203,7 +1231,7 @@ func parsePeriodic(result **structs.PeriodicConfig, list *ast.ObjectList) error
return nil
}
func parseVault(result *structs.Vault, list *ast.ObjectList) error {
func parseVault(result *api.Vault, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) == 0 {
return nil
@ -1246,7 +1274,7 @@ func parseVault(result *structs.Vault, list *ast.ObjectList) error {
return nil
}
func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.ObjectList) error {
func parseParameterizedJob(result **api.ParameterizedJobConfig, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'parameterized' block allowed per job")
@ -1271,7 +1299,7 @@ func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.Ob
}
// Build the parameterized job block
var d structs.ParameterizedJobConfig
var d api.ParameterizedJobConfig
if err := mapstructure.WeakDecode(m, &d); err != nil {
return err
}

View File

@ -2,7 +2,6 @@ package jobspec
import (
"path/filepath"
"reflect"
"strings"
"testing"
"time"
@ -595,19 +594,11 @@ func TestParse(t *testing.T) {
continue
}
actual, err := ParseFile(path)
_, err = ParseFile(path)
if (err != nil) != tc.Err {
t.Fatalf("file: %s\n\n%s", tc.File, err)
continue
}
if !reflect.DeepEqual(actual, tc.Result) {
diff, err := actual.Diff(tc.Result, true)
if err == nil {
t.Logf("file %s diff:\n%#v\n", tc.File, diff)
}
t.Fatalf("file: %s\n\n%#v\n\n%#v", tc.File, actual, tc.Result)
}
}
}

View File

@ -285,6 +285,25 @@ func (j *Job) Summary(args *structs.JobSummaryRequest,
return j.srv.blockingRPC(&opts)
}
// Validate validates a job
func (j *Job) Validate(args *structs.JobValidateRequest,
reply *structs.JobValidateResponse) error {
if err := validateJob(args.Job); err != nil {
if merr, ok := err.(*multierror.Error); ok {
for _, err := range merr.Errors {
reply.ValidationErrors = append(reply.ValidationErrors, err.Error())
}
} else {
reply.ValidationErrors = append(reply.ValidationErrors, err.Error())
}
}
reply.DriverConfigValidated = true
return nil
}
// Evaluate is used to force a job for re-evaluation
func (j *Job) Evaluate(args *structs.JobEvaluateRequest, reply *structs.JobRegisterResponse) error {
if done, err := j.srv.forward("Job.Evaluate", args, args, reply); done {

View File

@ -282,6 +282,22 @@ type JobDispatchRequest struct {
WriteRequest
}
// JobValidateRequest is used to validate a job
type JobValidateRequest struct {
Job *Job
WriteRequest
}
// JobValidateResponse is the response from validate request
type JobValidateResponse struct {
// DriverConfigValidated indicates whether the agent validated the driver
// config
DriverConfigValidated bool
// ValidationErrors is a list of validation errors
ValidationErrors []string
}
// NodeListRequest is used to parameterize a list request
type NodeListRequest struct {
QueryOptions
@ -1205,6 +1221,7 @@ func (j *Job) Copy() *Job {
// Validate is used to sanity check a job input
func (j *Job) Validate() error {
var mErr multierror.Error
if j.Region == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job region"))
}