diff --git a/.travis.yml b/.travis.yml index 9a7d747a6..d89e20c07 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ services: language: go go: - - 1.7 + - 1.8 branches: only: diff --git a/api/allocations_test.go b/api/allocations_test.go index 597000472..22e5c1c9b 100644 --- a/api/allocations_test.go +++ b/api/allocations_test.go @@ -4,6 +4,8 @@ import ( "reflect" "sort" "testing" + + "github.com/hashicorp/nomad/helper" ) func TestAllocations_List(t *testing.T) { @@ -28,9 +30,9 @@ func TestAllocations_List(t *testing.T) { return job := &Job{ - ID: "job1", - Name: "Job #1", - Type: JobTypeService, + ID: helper.StringToPtr("job1"), + Name: helper.StringToPtr("Job #1"), + Type: helper.StringToPtr(JobTypeService), } eval, _, err := c.Jobs().Register(job, nil) if err != nil { @@ -74,10 +76,11 @@ func TestAllocations_PrefixList(t *testing.T) { return job := &Job{ - ID: "job1", - Name: "Job #1", - Type: JobTypeService, + ID: helper.StringToPtr("job1"), + Name: helper.StringToPtr("Job #1"), + Type: helper.StringToPtr(JobTypeService), } + eval, _, err := c.Jobs().Register(job, nil) if err != nil { t.Fatalf("err: %s", err) diff --git a/api/compose_test.go b/api/compose_test.go index 5f3ca68a5..ef24f2e99 100644 --- a/api/compose_test.go +++ b/api/compose_test.go @@ -3,6 +3,8 @@ package api import ( "reflect" "testing" + + "github.com/hashicorp/nomad/helper" ) func TestCompose(t *testing.T) { @@ -12,14 +14,14 @@ func TestCompose(t *testing.T) { SetMeta("foo", "bar"). Constrain(NewConstraint("kernel.name", "=", "linux")). Require(&Resources{ - CPU: 1250, - MemoryMB: 1024, - DiskMB: 2048, - IOPS: 500, + CPU: helper.IntToPtr(1250), + MemoryMB: helper.IntToPtr(1024), + DiskMB: helper.IntToPtr(2048), + IOPS: helper.IntToPtr(500), Networks: []*NetworkResource{ &NetworkResource{ CIDR: "0.0.0.0/0", - MBits: 100, + MBits: helper.IntToPtr(100), ReservedPorts: []Port{{"", 80}, {"", 443}}, }, }, @@ -40,11 +42,11 @@ func TestCompose(t *testing.T) { // Check that the composed result looks correct expect := &Job{ - Region: "region1", - ID: "job1", - Name: "myjob", - Type: JobTypeService, - Priority: 2, + Region: helper.StringToPtr("region1"), + ID: helper.StringToPtr("job1"), + Name: helper.StringToPtr("myjob"), + Type: helper.StringToPtr(JobTypeService), + Priority: helper.IntToPtr(2), Datacenters: []string{ "dc1", }, @@ -60,8 +62,8 @@ func TestCompose(t *testing.T) { }, TaskGroups: []*TaskGroup{ &TaskGroup{ - Name: "grp1", - Count: 2, + Name: helper.StringToPtr("grp1"), + Count: helper.IntToPtr(2), Constraints: []*Constraint{ &Constraint{ LTarget: "kernel.name", @@ -74,14 +76,14 @@ func TestCompose(t *testing.T) { Name: "task1", Driver: "exec", Resources: &Resources{ - CPU: 1250, - MemoryMB: 1024, - DiskMB: 2048, - IOPS: 500, + CPU: helper.IntToPtr(1250), + MemoryMB: helper.IntToPtr(1024), + DiskMB: helper.IntToPtr(2048), + IOPS: helper.IntToPtr(500), Networks: []*NetworkResource{ &NetworkResource{ CIDR: "0.0.0.0/0", - MBits: 100, + MBits: helper.IntToPtr(100), ReservedPorts: []Port{ {"", 80}, {"", 443}, diff --git a/api/jobs.go b/api/jobs.go index b678812c2..6f58c488e 100644 --- a/api/jobs.go +++ b/api/jobs.go @@ -6,6 +6,9 @@ import ( "sort" "strconv" "time" + + "github.com/gorhill/cronexpr" + "github.com/hashicorp/nomad/helper" ) const ( @@ -14,6 +17,9 @@ const ( // JobTypeBatch indicates a short-lived process JobTypeBatch = "batch" + + // PeriodicSpecCron is used for a cron spec. + PeriodicSpecCron = "cron" ) const ( @@ -32,6 +38,16 @@ func (c *Client) Jobs() *Jobs { return &Jobs{client: c} } +func (j *Jobs) Validate(job *Job, q *WriteOptions) (*JobValidateResponse, *WriteMeta, error) { + var resp JobValidateResponse + req := &JobValidateRequest{Job: job} + if q != nil { + req.WriteRequest = WriteRequest{Region: q.Region} + } + wm, err := j.client.write("/v1/validate/job", req, &resp, q) + return &resp, wm, err +} + // Register is used to register a new job. It returns the ID // of the evaluation, along with any errors encountered. func (j *Jobs) Register(job *Job, q *WriteOptions) (string, *WriteMeta, error) { @@ -162,7 +178,7 @@ func (j *Jobs) Plan(job *Job, diff bool, q *WriteOptions) (*JobPlanResponse, *Wr Job: job, Diff: diff, } - wm, err := j.client.write("/v1/job/"+job.ID+"/plan", req, &resp, q) + wm, err := j.client.write("/v1/job/"+*job.ID+"/plan", req, &resp, q) if err != nil { return nil, nil, err } @@ -207,13 +223,42 @@ type UpdateStrategy struct { // PeriodicConfig is for serializing periodic config for a job. type PeriodicConfig struct { - Enabled bool - Spec string - SpecType string - ProhibitOverlap bool + Enabled *bool + Spec *string + SpecType *string + ProhibitOverlap *bool TimeZone *string } +func (p *PeriodicConfig) Canonicalize() { + if p.Enabled == nil { + p.Enabled = helper.BoolToPtr(true) + } + if p.SpecType == nil { + p.SpecType = helper.StringToPtr(PeriodicSpecCron) + } + if p.ProhibitOverlap == nil { + p.ProhibitOverlap = helper.BoolToPtr(false) + } + if p.TimeZone == nil || *p.TimeZone == "" { + p.TimeZone = helper.StringToPtr("UTC") + } +} + +// Next returns the closest time instant matching the spec that is after the +// passed time. If no matching instance exists, the zero value of time.Time is +// returned. The `time.Location` of the returned value matches that of the +// passed time. +func (p *PeriodicConfig) Next(fromTime time.Time) time.Time { + if *p.SpecType == PeriodicSpecCron { + if e, err := cronexpr.Parse(*p.Spec); err == nil { + return e.Next(fromTime) + } + } + + return time.Time{} +} + func (p *PeriodicConfig) GetLocation() (*time.Location, error) { if p.TimeZone == nil || *p.TimeZone == "" { return time.UTC, nil @@ -231,13 +276,13 @@ type ParameterizedJobConfig struct { // Job is used to serialize a job. type Job struct { - Region string - ID string - ParentID string - Name string - Type string - Priority int - AllAtOnce bool + Region *string + ID *string + ParentID *string + Name *string + Type *string + Priority *int + AllAtOnce *bool Datacenters []string Constraints []*Constraint TaskGroups []*TaskGroup @@ -246,12 +291,71 @@ type Job struct { ParameterizedJob *ParameterizedJobConfig Payload []byte Meta map[string]string - VaultToken string - Status string - StatusDescription string - CreateIndex uint64 - ModifyIndex uint64 - JobModifyIndex uint64 + VaultToken *string + Status *string + StatusDescription *string + CreateIndex *uint64 + ModifyIndex *uint64 + JobModifyIndex *uint64 +} + +// IsPeriodic returns whether a job is periodic. +func (j *Job) IsPeriodic() bool { + return j.Periodic != nil +} + +// IsParameterized returns whether a job is parameterized job. +func (j *Job) IsParameterized() bool { + return j.ParameterizedJob != nil +} + +func (j *Job) Canonicalize() { + if j.ID == nil { + j.ID = helper.StringToPtr("") + } + if j.Name == nil { + j.Name = j.ID + } + if j.ParentID == nil { + j.ParentID = helper.StringToPtr("") + } + if j.Priority == nil { + j.Priority = helper.IntToPtr(50) + } + if j.Region == nil { + j.Region = helper.StringToPtr("global") + } + if j.Type == nil { + j.Type = helper.StringToPtr("service") + } + if j.AllAtOnce == nil { + j.AllAtOnce = helper.BoolToPtr(false) + } + if j.VaultToken == nil { + j.VaultToken = helper.StringToPtr("") + } + if j.Status == nil { + j.Status = helper.StringToPtr("") + } + if j.StatusDescription == nil { + j.StatusDescription = helper.StringToPtr("") + } + if j.CreateIndex == nil { + j.CreateIndex = helper.Uint64ToPtr(0) + } + if j.ModifyIndex == nil { + j.ModifyIndex = helper.Uint64ToPtr(0) + } + if j.JobModifyIndex == nil { + j.JobModifyIndex = helper.Uint64ToPtr(0) + } + if j.Periodic != nil { + j.Periodic.Canonicalize() + } + + for _, tg := range j.TaskGroups { + tg.Canonicalize(*j.Type) + } } // JobSummary summarizes the state of the allocations of a job @@ -339,11 +443,11 @@ func NewBatchJob(id, name, region string, pri int) *Job { // newJob is used to create a new Job struct. func newJob(id, name, region, typ string, pri int) *Job { return &Job{ - Region: region, - ID: id, - Name: name, - Type: typ, - Priority: pri, + Region: ®ion, + ID: &id, + Name: &name, + Type: &typ, + Priority: &pri, } } @@ -380,6 +484,39 @@ func (j *Job) AddPeriodicConfig(cfg *PeriodicConfig) *Job { return j } +type WriteRequest struct { + // The target region for this write + Region string +} + +// JobValidateRequest is used to validate a job +type JobValidateRequest struct { + Job *Job + WriteRequest +} + +// JobValidateResponse is the response from validate request +type JobValidateResponse struct { + // DriverConfigValidated indicates whether the agent validated the driver + // config + DriverConfigValidated bool + + // ValidationErrors is a list of validation errors + ValidationErrors []string +} + +// JobUpdateRequest is used to update a job +type JobRegisterRequest struct { + Job *Job + // If EnforceIndex is set then the job will only be registered if the passed + // JobModifyIndex matches the current Jobs index. If the index is zero, the + // register only occurs if the job is new. + EnforceIndex bool + JobModifyIndex uint64 + + WriteRequest +} + // RegisterJobRequest is used to serialize a job registration type RegisterJobRequest struct { Job *Job @@ -400,6 +537,7 @@ type deregisterJobResponse struct { type JobPlanRequest struct { Job *Job Diff bool + WriteRequest } type JobPlanResponse struct { diff --git a/api/jobs_test.go b/api/jobs_test.go index c0d429109..f1e24b7eb 100644 --- a/api/jobs_test.go +++ b/api/jobs_test.go @@ -5,7 +5,9 @@ import ( "sort" "strings" "testing" + "time" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" ) @@ -45,11 +47,193 @@ func TestJobs_Register(t *testing.T) { assertQueryMeta(t, qm) // Check that we got the expected response - if len(resp) != 1 || resp[0].ID != job.ID { + if len(resp) != 1 || resp[0].ID != *job.ID { t.Fatalf("bad: %#v", resp[0]) } } +func TestJobs_Validate(t *testing.T) { + c, s := makeClient(t, nil, nil) + defer s.Stop() + jobs := c.Jobs() + + // Create a job and attempt to register it + job := testJob() + resp, _, err := jobs.Validate(job, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + if len(resp.ValidationErrors) != 0 { + t.Fatalf("bad %v", resp) + } + + job.ID = nil + resp1, _, err := jobs.Validate(job, nil) + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(resp1.ValidationErrors) == 0 { + t.Fatalf("bad %v", resp1) + } +} + +func TestJobs_Canonicalize(t *testing.T) { + testCases := []struct { + name string + expected *Job + input *Job + }{ + { + name: "empty", + input: &Job{ + TaskGroups: []*TaskGroup{ + { + Tasks: []*Task{ + {}, + }, + }, + }, + }, + expected: &Job{ + ID: helper.StringToPtr(""), + Name: helper.StringToPtr(""), + Region: helper.StringToPtr("global"), + Type: helper.StringToPtr("service"), + ParentID: helper.StringToPtr(""), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(false), + VaultToken: helper.StringToPtr(""), + Status: helper.StringToPtr(""), + StatusDescription: helper.StringToPtr(""), + CreateIndex: helper.Uint64ToPtr(0), + ModifyIndex: helper.Uint64ToPtr(0), + JobModifyIndex: helper.Uint64ToPtr(0), + TaskGroups: []*TaskGroup{ + { + Name: helper.StringToPtr(""), + Count: helper.IntToPtr(1), + EphemeralDisk: &EphemeralDisk{ + Sticky: helper.BoolToPtr(false), + Migrate: helper.BoolToPtr(false), + SizeMB: helper.IntToPtr(300), + }, + RestartPolicy: &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(2), + Interval: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + }, + Tasks: []*Task{ + { + KillTimeout: helper.TimeToPtr(5 * time.Second), + LogConfig: DefaultLogConfig(), + Resources: MinResources(), + }, + }, + }, + }, + }, + }, + { + name: "partial", + input: &Job{ + Name: helper.StringToPtr("foo"), + ID: helper.StringToPtr("bar"), + ParentID: helper.StringToPtr("lol"), + TaskGroups: []*TaskGroup{ + { + Name: helper.StringToPtr("bar"), + Tasks: []*Task{ + { + Name: "task1", + }, + }, + }, + }, + }, + expected: &Job{ + ID: helper.StringToPtr("bar"), + Name: helper.StringToPtr("foo"), + Region: helper.StringToPtr("global"), + Type: helper.StringToPtr("service"), + ParentID: helper.StringToPtr("lol"), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(false), + VaultToken: helper.StringToPtr(""), + Status: helper.StringToPtr(""), + StatusDescription: helper.StringToPtr(""), + CreateIndex: helper.Uint64ToPtr(0), + ModifyIndex: helper.Uint64ToPtr(0), + JobModifyIndex: helper.Uint64ToPtr(0), + TaskGroups: []*TaskGroup{ + { + Name: helper.StringToPtr("bar"), + Count: helper.IntToPtr(1), + EphemeralDisk: &EphemeralDisk{ + Sticky: helper.BoolToPtr(false), + Migrate: helper.BoolToPtr(false), + SizeMB: helper.IntToPtr(300), + }, + RestartPolicy: &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(2), + Interval: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + }, + Tasks: []*Task{ + { + Name: "task1", + LogConfig: DefaultLogConfig(), + Resources: MinResources(), + KillTimeout: helper.TimeToPtr(5 * time.Second), + }, + }, + }, + }, + }, + }, + { + name: "periodic", + input: &Job{ + ID: helper.StringToPtr("bar"), + Periodic: &PeriodicConfig{}, + }, + expected: &Job{ + ID: helper.StringToPtr("bar"), + ParentID: helper.StringToPtr(""), + Name: helper.StringToPtr("bar"), + Region: helper.StringToPtr("global"), + Type: helper.StringToPtr("service"), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(false), + VaultToken: helper.StringToPtr(""), + Status: helper.StringToPtr(""), + StatusDescription: helper.StringToPtr(""), + CreateIndex: helper.Uint64ToPtr(0), + ModifyIndex: helper.Uint64ToPtr(0), + JobModifyIndex: helper.Uint64ToPtr(0), + Periodic: &PeriodicConfig{ + Enabled: helper.BoolToPtr(true), + SpecType: helper.StringToPtr(PeriodicSpecCron), + ProhibitOverlap: helper.BoolToPtr(false), + TimeZone: helper.StringToPtr("UTC"), + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.input.Canonicalize() + if !reflect.DeepEqual(tc.input, tc.expected) { + t.Fatalf("Name: %v, expected:\n%#v\nactual:\n%#v", tc.name, tc.expected, tc.input) + } + }) + } +} + func TestJobs_EnforceRegister(t *testing.T) { c, s := makeClient(t, nil, nil) defer s.Stop() @@ -96,7 +280,7 @@ func TestJobs_EnforceRegister(t *testing.T) { t.Fatalf("bad length: %d", len(resp)) } - if resp[0].ID != job.ID { + if resp[0].ID != *job.ID { t.Fatalf("bad: %#v", resp[0]) } curIndex := resp[0].JobModifyIndex @@ -146,7 +330,7 @@ func TestJobs_Info(t *testing.T) { assertQueryMeta(t, qm) // Check that the result is what we expect - if result == nil || result.ID != job.ID { + if result == nil || *result.ID != *job.ID { t.Fatalf("expect: %#v, got: %#v", job, result) } } @@ -178,13 +362,13 @@ func TestJobs_PrefixList(t *testing.T) { // Query the job again and ensure it exists // Listing when nothing exists returns empty - results, qm, err = jobs.PrefixList(job.ID[:1]) + results, qm, err = jobs.PrefixList((*job.ID)[:1]) if err != nil { t.Fatalf("err: %s", err) } // Check if we have the right list - if len(results) != 1 || results[0].ID != job.ID { + if len(results) != 1 || results[0].ID != *job.ID { t.Fatalf("bad: %#v", results) } } @@ -222,7 +406,7 @@ func TestJobs_List(t *testing.T) { } // Check if we have the right list - if len(results) != 1 || results[0].ID != job.ID { + if len(results) != 1 || results[0].ID != *job.ID { t.Fatalf("bad: %#v", results) } } @@ -387,8 +571,8 @@ func TestJobs_PeriodicForce(t *testing.T) { } testutil.WaitForResult(func() (bool, error) { - out, _, err := jobs.Info(job.ID, nil) - if err != nil || out == nil || out.ID != job.ID { + out, _, err := jobs.Info(*job.ID, nil) + if err != nil || out == nil || *out.ID != *job.ID { return false, err } return true, nil @@ -397,7 +581,7 @@ func TestJobs_PeriodicForce(t *testing.T) { }) // Try force again - evalID, wm, err := jobs.PeriodicForce(job.ID, nil) + evalID, wm, err := jobs.PeriodicForce(*job.ID, nil) if err != nil { t.Fatalf("err: %s", err) } @@ -519,10 +703,10 @@ func TestJobs_JobSummary(t *testing.T) { assertQueryMeta(t, qm) // Check that the result is what we expect - if job.ID != result.JobID { + if *job.ID != result.JobID { t.Fatalf("err: expected job id of %s saw %s", job.ID, result.JobID) } - if _, ok := result.Summary[taskName]; !ok { + if _, ok := result.Summary[*taskName]; !ok { t.Fatalf("err: unable to find %s key in job summary", taskName) } } @@ -530,11 +714,11 @@ func TestJobs_JobSummary(t *testing.T) { func TestJobs_NewBatchJob(t *testing.T) { job := NewBatchJob("job1", "myjob", "region1", 5) expect := &Job{ - Region: "region1", - ID: "job1", - Name: "myjob", - Type: JobTypeBatch, - Priority: 5, + Region: helper.StringToPtr("region1"), + ID: helper.StringToPtr("job1"), + Name: helper.StringToPtr("myjob"), + Type: helper.StringToPtr(JobTypeBatch), + Priority: helper.IntToPtr(5), } if !reflect.DeepEqual(job, expect) { t.Fatalf("expect: %#v, got: %#v", expect, job) @@ -544,11 +728,11 @@ func TestJobs_NewBatchJob(t *testing.T) { func TestJobs_NewServiceJob(t *testing.T) { job := NewServiceJob("job1", "myjob", "region1", 5) expect := &Job{ - Region: "region1", - ID: "job1", - Name: "myjob", - Type: JobTypeService, - Priority: 5, + Region: helper.StringToPtr("region1"), + ID: helper.StringToPtr("job1"), + Name: helper.StringToPtr("myjob"), + Type: helper.StringToPtr(JobTypeService), + Priority: helper.IntToPtr(5), } if !reflect.DeepEqual(job, expect) { t.Fatalf("expect: %#v, got: %#v", expect, job) diff --git a/api/jobs_testing.go b/api/jobs_testing.go new file mode 100644 index 000000000..c27de7d38 --- /dev/null +++ b/api/jobs_testing.go @@ -0,0 +1,110 @@ +package api + +import ( + "time" + + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/nomad/structs" +) + +func MockJob() *Job { + job := &Job{ + Region: helper.StringToPtr("global"), + ID: helper.StringToPtr(structs.GenerateUUID()), + Name: helper.StringToPtr("my-job"), + Type: helper.StringToPtr("service"), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(false), + Datacenters: []string{"dc1"}, + Constraints: []*Constraint{ + &Constraint{ + LTarget: "${attr.kernel.name}", + RTarget: "linux", + Operand: "=", + }, + }, + TaskGroups: []*TaskGroup{ + &TaskGroup{ + Name: helper.StringToPtr("web"), + Count: helper.IntToPtr(10), + EphemeralDisk: &EphemeralDisk{ + SizeMB: helper.IntToPtr(150), + }, + RestartPolicy: &RestartPolicy{ + Attempts: helper.IntToPtr(3), + Interval: helper.TimeToPtr(10 * time.Minute), + Delay: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + }, + Tasks: []*Task{ + &Task{ + Name: "web", + Driver: "exec", + Config: map[string]interface{}{ + "command": "/bin/date", + }, + Env: map[string]string{ + "FOO": "bar", + }, + Services: []Service{ + { + Name: "${TASK}-frontend", + PortLabel: "http", + Tags: []string{"pci:${meta.pci-dss}", "datacenter:${node.datacenter}"}, + Checks: []ServiceCheck{ + { + Name: "check-table", + Type: "script", + Command: "/usr/local/check-table-${meta.database}", + Args: []string{"${meta.version}"}, + Interval: 30 * time.Second, + Timeout: 5 * time.Second, + }, + }, + }, + { + Name: "${TASK}-admin", + PortLabel: "admin", + }, + }, + LogConfig: DefaultLogConfig(), + Resources: &Resources{ + CPU: helper.IntToPtr(500), + MemoryMB: helper.IntToPtr(256), + Networks: []*NetworkResource{ + &NetworkResource{ + MBits: helper.IntToPtr(50), + DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}}, + }, + }, + }, + Meta: map[string]string{ + "foo": "bar", + }, + }, + }, + Meta: map[string]string{ + "elb_check_type": "http", + "elb_check_interval": "30s", + "elb_check_min": "3", + }, + }, + }, + Meta: map[string]string{ + "owner": "armon", + }, + } + job.Canonicalize() + return job +} + +func MockPeriodicJob() *Job { + j := MockJob() + j.Type = helper.StringToPtr("batch") + j.Periodic = &PeriodicConfig{ + Enabled: helper.BoolToPtr(true), + SpecType: helper.StringToPtr("cron"), + Spec: helper.StringToPtr("*/30 * * * *"), + } + return j +} diff --git a/api/resources.go b/api/resources.go index 8cb2505ba..96ef5967a 100644 --- a/api/resources.go +++ b/api/resources.go @@ -1,15 +1,54 @@ package api +import "github.com/hashicorp/nomad/helper" + // Resources encapsulates the required resources of // a given task or task group. type Resources struct { - CPU int - MemoryMB int - DiskMB int - IOPS int + CPU *int + MemoryMB *int + DiskMB *int + IOPS *int Networks []*NetworkResource } +func (r *Resources) Canonicalize() { + for _, n := range r.Networks { + n.Canonicalize() + } +} + +func MinResources() *Resources { + return &Resources{ + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(10), + IOPS: helper.IntToPtr(0), + } + +} + +// Merge merges this resource with another resource. +func (r *Resources) Merge(other *Resources) { + if other == nil { + return + } + if other.CPU != nil { + r.CPU = other.CPU + } + if other.MemoryMB != nil { + r.MemoryMB = other.MemoryMB + } + if other.DiskMB != nil { + r.DiskMB = other.DiskMB + } + if other.IOPS != nil { + r.IOPS = other.IOPS + } + if len(other.Networks) != 0 { + r.Networks = other.Networks + } +} + type Port struct { Label string Value int @@ -23,5 +62,11 @@ type NetworkResource struct { ReservedPorts []Port DynamicPorts []Port IP string - MBits int + MBits *int +} + +func (n *NetworkResource) Canonicalize() { + if n.MBits == nil { + n.MBits = helper.IntToPtr(10) + } } diff --git a/api/tasks.go b/api/tasks.go index d64bc7bb2..cc38a3001 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -1,7 +1,10 @@ package api import ( + "strings" "time" + + "github.com/hashicorp/nomad/helper" ) // MemoryStats holds memory usage related stats @@ -51,10 +54,25 @@ type AllocResourceUsage struct { // RestartPolicy defines how the Nomad client restarts // tasks in a taskgroup when they fail type RestartPolicy struct { - Interval time.Duration - Attempts int - Delay time.Duration - Mode string + Interval *time.Duration + Attempts *int + Delay *time.Duration + Mode *string +} + +func (r *RestartPolicy) Merge(rp *RestartPolicy) { + if rp.Interval != nil { + r.Interval = rp.Interval + } + if rp.Attempts != nil { + r.Attempts = rp.Attempts + } + if rp.Delay != nil { + r.Delay = rp.Delay + } + if rp.Mode != nil { + r.Mode = rp.Mode + } } // The ServiceCheck data model represents the consul health check that @@ -84,15 +102,35 @@ type Service struct { // EphemeralDisk is an ephemeral disk object type EphemeralDisk struct { - Sticky bool - Migrate bool - SizeMB int `mapstructure:"size"` + Sticky *bool + Migrate *bool + SizeMB *int `mapstructure:"size"` +} + +func DefaultEphemeralDisk() *EphemeralDisk { + return &EphemeralDisk{ + Sticky: helper.BoolToPtr(false), + Migrate: helper.BoolToPtr(false), + SizeMB: helper.IntToPtr(300), + } +} + +func (e *EphemeralDisk) Canonicalize() { + if e.Sticky == nil { + e.Sticky = helper.BoolToPtr(false) + } + if e.Migrate == nil { + e.Migrate = helper.BoolToPtr(false) + } + if e.SizeMB == nil { + e.SizeMB = helper.IntToPtr(300) + } } // TaskGroup is the unit of scheduling. type TaskGroup struct { - Name string - Count int + Name *string + Count *int Constraints []*Constraint Tasks []*Task RestartPolicy *RestartPolicy @@ -103,11 +141,51 @@ type TaskGroup struct { // NewTaskGroup creates a new TaskGroup. func NewTaskGroup(name string, count int) *TaskGroup { return &TaskGroup{ - Name: name, - Count: count, + Name: helper.StringToPtr(name), + Count: helper.IntToPtr(count), } } +func (g *TaskGroup) Canonicalize(jobType string) { + if g.Name == nil { + g.Name = helper.StringToPtr("") + } + if g.Count == nil { + g.Count = helper.IntToPtr(1) + } + for _, t := range g.Tasks { + t.Canonicalize() + } + if g.EphemeralDisk == nil { + g.EphemeralDisk = DefaultEphemeralDisk() + } else { + g.EphemeralDisk.Canonicalize() + } + + var defaultRestartPolicy *RestartPolicy + switch jobType { + case "service", "system": + defaultRestartPolicy = &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(2), + Interval: helper.TimeToPtr(1 * time.Minute), + Mode: helper.StringToPtr("delay"), + } + default: + defaultRestartPolicy = &RestartPolicy{ + Delay: helper.TimeToPtr(15 * time.Second), + Attempts: helper.IntToPtr(15), + Interval: helper.TimeToPtr(7 * 24 * time.Hour), + Mode: helper.StringToPtr("delay"), + } + } + + if g.RestartPolicy != nil { + defaultRestartPolicy.Merge(g.RestartPolicy) + } + g.RestartPolicy = defaultRestartPolicy +} + // Constrain is used to add a constraint to a task group. func (g *TaskGroup) Constrain(c *Constraint) *TaskGroup { g.Constraints = append(g.Constraints, c) @@ -137,8 +215,24 @@ func (g *TaskGroup) RequireDisk(disk *EphemeralDisk) *TaskGroup { // LogConfig provides configuration for log rotation type LogConfig struct { - MaxFiles int - MaxFileSizeMB int + MaxFiles *int + MaxFileSizeMB *int +} + +func DefaultLogConfig() *LogConfig { + return &LogConfig{ + MaxFiles: helper.IntToPtr(10), + MaxFileSizeMB: helper.IntToPtr(10), + } +} + +func (l *LogConfig) Canonicalize() { + if l.MaxFiles == nil { + l.MaxFiles = helper.IntToPtr(10) + } + if l.MaxFileSizeMB == nil { + l.MaxFileSizeMB = helper.IntToPtr(10) + } } // DispatchPayloadConfig configures how a task gets its input from a job dispatch @@ -157,7 +251,7 @@ type Task struct { Services []Service Resources *Resources Meta map[string]string - KillTimeout time.Duration + KillTimeout *time.Duration LogConfig *LogConfig Artifacts []*TaskArtifact Vault *Vault @@ -166,28 +260,91 @@ type Task struct { Leader *bool } +func (t *Task) Canonicalize() { + if t.LogConfig == nil { + t.LogConfig = DefaultLogConfig() + } else { + t.LogConfig.Canonicalize() + } + if t.Vault != nil { + t.Vault.Canonicalize() + } + for _, artifact := range t.Artifacts { + artifact.Canonicalize() + } + for _, tmpl := range t.Templates { + tmpl.Canonicalize() + } + + if t.KillTimeout == nil { + t.KillTimeout = helper.TimeToPtr(5 * time.Second) + } + + min := MinResources() + min.Merge(t.Resources) + min.Canonicalize() + t.Resources = min +} + // TaskArtifact is used to download artifacts before running a task. type TaskArtifact struct { - GetterSource string + GetterSource *string GetterOptions map[string]string - RelativeDest string + RelativeDest *string +} + +func (a *TaskArtifact) Canonicalize() { + if a.RelativeDest == nil { + a.RelativeDest = helper.StringToPtr("local/") + } } type Template struct { - SourcePath string - DestPath string - EmbeddedTmpl string - ChangeMode string - ChangeSignal string - Splay time.Duration - Perms string + SourcePath *string + DestPath *string + EmbeddedTmpl *string + ChangeMode *string + ChangeSignal *string + Splay *time.Duration + Perms *string +} + +func (tmpl *Template) Canonicalize() { + if tmpl.ChangeMode == nil { + tmpl.ChangeMode = helper.StringToPtr("restart") + } + if tmpl.Splay == nil { + tmpl.Splay = helper.TimeToPtr(5 * time.Second) + } + if tmpl.Perms == nil { + tmpl.Perms = helper.StringToPtr("0644") + } + if *tmpl.ChangeMode == "signal" && tmpl.ChangeSignal == nil { + tmpl.ChangeSignal = helper.StringToPtr("SIGHUP") + } + if tmpl.ChangeSignal != nil { + sig := *tmpl.ChangeSignal + tmpl.ChangeSignal = helper.StringToPtr(strings.ToUpper(sig)) + } } type Vault struct { Policies []string - Env bool - ChangeMode string - ChangeSignal string + Env *bool + ChangeMode *string + ChangeSignal *string +} + +func (v *Vault) Canonicalize() { + if v.Env == nil { + v.Env = helper.BoolToPtr(true) + } + if v.ChangeMode == nil { + v.ChangeMode = helper.StringToPtr("restart") + } + if v.ChangeSignal == nil { + v.ChangeSignal = helper.StringToPtr("SIGHUP") + } } // NewTask creates and initializes a new Task. diff --git a/api/tasks_test.go b/api/tasks_test.go index bbdf12550..7756bfe52 100644 --- a/api/tasks_test.go +++ b/api/tasks_test.go @@ -3,13 +3,15 @@ package api import ( "reflect" "testing" + + "github.com/hashicorp/nomad/helper" ) func TestTaskGroup_NewTaskGroup(t *testing.T) { grp := NewTaskGroup("grp1", 2) expect := &TaskGroup{ - Name: "grp1", - Count: 2, + Name: helper.StringToPtr("grp1"), + Count: helper.IntToPtr(2), } if !reflect.DeepEqual(grp, expect) { t.Fatalf("expect: %#v, got: %#v", expect, grp) @@ -162,14 +164,14 @@ func TestTask_Require(t *testing.T) { // Create some require resources resources := &Resources{ - CPU: 1250, - MemoryMB: 128, - DiskMB: 2048, - IOPS: 500, + CPU: helper.IntToPtr(1250), + MemoryMB: helper.IntToPtr(128), + DiskMB: helper.IntToPtr(2048), + IOPS: helper.IntToPtr(500), Networks: []*NetworkResource{ &NetworkResource{ CIDR: "0.0.0.0/0", - MBits: 100, + MBits: helper.IntToPtr(100), ReservedPorts: []Port{{"", 80}, {"", 443}}, }, }, diff --git a/api/util_test.go b/api/util_test.go index aaca7487e..bf1c0556c 100644 --- a/api/util_test.go +++ b/api/util_test.go @@ -1,6 +1,10 @@ package api -import "testing" +import ( + "testing" + + "github.com/hashicorp/nomad/helper" +) func assertQueryMeta(t *testing.T, qm *QueryMeta) { if qm.LastIndex == 0 { @@ -21,19 +25,19 @@ func testJob() *Job { task := NewTask("task1", "exec"). SetConfig("command", "/bin/sleep"). Require(&Resources{ - CPU: 100, - MemoryMB: 256, - IOPS: 10, + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(256), + IOPS: helper.IntToPtr(10), }). SetLogConfig(&LogConfig{ - MaxFiles: 1, - MaxFileSizeMB: 2, + MaxFiles: helper.IntToPtr(1), + MaxFileSizeMB: helper.IntToPtr(2), }) group := NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&EphemeralDisk{ - SizeMB: 25, + SizeMB: helper.IntToPtr(25), }) job := NewBatchJob("job1", "redis", "region1", 1). @@ -45,9 +49,9 @@ func testJob() *Job { func testPeriodicJob() *Job { job := testJob().AddPeriodicConfig(&PeriodicConfig{ - Enabled: true, - Spec: "*/30 * * * *", - SpecType: "cron", + Enabled: helper.BoolToPtr(true), + Spec: helper.StringToPtr("*/30 * * * *"), + SpecType: helper.StringToPtr("cron"), }) return job } diff --git a/command/agent/config.go b/command/agent/config.go index dc90c740f..9efac4b4d 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -809,7 +809,7 @@ func normalizeAdvertise(addr string, bind string, defport int, dev bool) (string func isMissingPort(err error) bool { // matches error const in net/ipsock.go const missingPort = "missing port in address" - return err != nil && strings.HasPrefix(err.Error(), missingPort) + return err != nil && strings.Contains(err.Error(), missingPort) } // Merge is used to merge two server configs together diff --git a/command/agent/http.go b/command/agent/http.go index a96c647d6..cdeb05741 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -170,6 +170,8 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) { s.mux.HandleFunc("/v1/agent/servers", s.wrap(s.AgentServersRequest)) s.mux.HandleFunc("/v1/agent/keyring/", s.wrap(s.KeyringOperationRequest)) + s.mux.HandleFunc("/v1/validate/job", s.wrap(s.ValidateJobRequest)) + s.mux.HandleFunc("/v1/regions", s.wrap(s.RegionListRequest)) s.mux.HandleFunc("/v1/status/leader", s.wrap(s.StatusLeaderRequest)) diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index 8035ca71f..dc295c74c 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -6,6 +6,8 @@ import ( "strings" "github.com/golang/snappy" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/nomad/structs" ) @@ -91,26 +93,79 @@ func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request, return nil, CodedError(405, ErrInvalidMethod) } - var args structs.JobPlanRequest + var args api.JobPlanRequest if err := decodeBody(req, &args); err != nil { return nil, CodedError(400, err.Error()) } if args.Job == nil { return nil, CodedError(400, "Job must be specified") } - if jobName != "" && args.Job.ID != jobName { + if args.Job.ID == nil { + return nil, CodedError(400, "Job must have a valid ID") + } + if jobName != "" && *args.Job.ID != jobName { return nil, CodedError(400, "Job ID does not match") } s.parseRegion(req, &args.Region) + sJob := apiJobToStructJob(args.Job) + planReq := structs.JobPlanRequest{ + Job: sJob, + Diff: args.Diff, + WriteRequest: structs.WriteRequest{ + Region: args.WriteRequest.Region, + }, + } var out structs.JobPlanResponse - if err := s.agent.RPC("Job.Plan", &args, &out); err != nil { + if err := s.agent.RPC("Job.Plan", &planReq, &out); err != nil { return nil, err } setIndex(resp, out.Index) return out, nil } +func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + // Ensure request method is POST or PUT + if !(req.Method == "POST" || req.Method == "PUT") { + return nil, CodedError(405, ErrInvalidMethod) + } + + var validateRequest api.JobValidateRequest + if err := decodeBody(req, &validateRequest); err != nil { + return nil, CodedError(400, err.Error()) + } + if validateRequest.Job == nil { + return nil, CodedError(400, "Job must be specified") + } + + job := apiJobToStructJob(validateRequest.Job) + args := structs.JobValidateRequest{ + Job: job, + WriteRequest: structs.WriteRequest{ + Region: validateRequest.Region, + }, + } + s.parseRegion(req, &args.Region) + + var out structs.JobValidateResponse + if err := s.agent.RPC("Job.Validate", &args, &out); err != nil { + + // Fall back to do local validation + args.Job.Canonicalize() + if vErr := args.Job.Validate(); vErr != nil { + if merr, ok := vErr.(*multierror.Error); ok { + for _, e := range merr.Errors { + out.ValidationErrors = append(out.ValidationErrors, e.Error()) + } + } else { + out.ValidationErrors = append(out.ValidationErrors, vErr.Error()) + } + } + } + + return out, nil +} + func (s *HTTPServer) periodicForceRequest(resp http.ResponseWriter, req *http.Request, jobName string) (interface{}, error) { if req.Method != "PUT" && req.Method != "POST" { @@ -230,20 +285,34 @@ func (s *HTTPServer) jobQuery(resp http.ResponseWriter, req *http.Request, func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request, jobName string) (interface{}, error) { - var args structs.JobRegisterRequest + var args api.JobRegisterRequest if err := decodeBody(req, &args); err != nil { return nil, CodedError(400, err.Error()) } if args.Job == nil { return nil, CodedError(400, "Job must be specified") } - if jobName != "" && args.Job.ID != jobName { - return nil, CodedError(400, "Job ID does not match") + + if args.Job.ID == nil { + return nil, CodedError(400, "Job ID hasn't been provided") + } + if jobName != "" && *args.Job.ID != jobName { + return nil, CodedError(400, "Job ID does not match name") } s.parseRegion(req, &args.Region) + sJob := apiJobToStructJob(args.Job) + + regReq := structs.JobRegisterRequest{ + Job: sJob, + EnforceIndex: args.EnforceIndex, + JobModifyIndex: args.JobModifyIndex, + WriteRequest: structs.WriteRequest{ + Region: args.WriteRequest.Region, + }, + } var out structs.JobRegisterResponse - if err := s.agent.RPC("Job.Register", &args, &out); err != nil { + if err := s.agent.RPC("Job.Register", ®Req, &out); err != nil { return nil, err } setIndex(resp, out.Index) @@ -310,3 +379,204 @@ func (s *HTTPServer) jobDispatchRequest(resp http.ResponseWriter, req *http.Requ setIndex(resp, out.Index) return out, nil } + +func apiJobToStructJob(job *api.Job) *structs.Job { + job.Canonicalize() + + j := &structs.Job{ + Region: *job.Region, + ID: *job.ID, + ParentID: *job.ParentID, + Name: *job.Name, + Type: *job.Type, + Priority: *job.Priority, + AllAtOnce: *job.AllAtOnce, + Datacenters: job.Datacenters, + Payload: job.Payload, + Meta: job.Meta, + VaultToken: *job.VaultToken, + Status: *job.Status, + StatusDescription: *job.StatusDescription, + CreateIndex: *job.CreateIndex, + ModifyIndex: *job.ModifyIndex, + JobModifyIndex: *job.JobModifyIndex, + } + + j.Constraints = make([]*structs.Constraint, len(job.Constraints)) + for i, c := range job.Constraints { + con := &structs.Constraint{} + apiConstraintToStructs(c, con) + j.Constraints[i] = con + } + if job.Update != nil { + j.Update = structs.UpdateStrategy{ + Stagger: job.Update.Stagger, + MaxParallel: job.Update.MaxParallel, + } + } + if job.Periodic != nil { + j.Periodic = &structs.PeriodicConfig{ + Enabled: *job.Periodic.Enabled, + SpecType: *job.Periodic.SpecType, + ProhibitOverlap: *job.Periodic.ProhibitOverlap, + } + if job.Periodic.Spec != nil { + j.Periodic.Spec = *job.Periodic.Spec + } + } + if job.ParameterizedJob != nil { + j.ParameterizedJob = &structs.ParameterizedJobConfig{ + Payload: job.ParameterizedJob.Payload, + MetaRequired: job.ParameterizedJob.MetaRequired, + MetaOptional: job.ParameterizedJob.MetaOptional, + } + } + + j.TaskGroups = make([]*structs.TaskGroup, len(job.TaskGroups)) + for i, taskGroup := range job.TaskGroups { + tg := &structs.TaskGroup{} + apiTgToStructsTG(taskGroup, tg) + j.TaskGroups[i] = tg + } + + return j +} + +func apiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) { + tg.Name = *taskGroup.Name + tg.Count = *taskGroup.Count + tg.Meta = taskGroup.Meta + tg.Constraints = make([]*structs.Constraint, len(taskGroup.Constraints)) + for k, constraint := range taskGroup.Constraints { + c := &structs.Constraint{} + apiConstraintToStructs(constraint, c) + tg.Constraints[k] = c + } + tg.RestartPolicy = &structs.RestartPolicy{ + Attempts: *taskGroup.RestartPolicy.Attempts, + Interval: *taskGroup.RestartPolicy.Interval, + Delay: *taskGroup.RestartPolicy.Delay, + Mode: *taskGroup.RestartPolicy.Mode, + } + tg.EphemeralDisk = &structs.EphemeralDisk{ + Sticky: *taskGroup.EphemeralDisk.Sticky, + SizeMB: *taskGroup.EphemeralDisk.SizeMB, + Migrate: *taskGroup.EphemeralDisk.Migrate, + } + tg.Meta = taskGroup.Meta + tg.Tasks = make([]*structs.Task, len(taskGroup.Tasks)) + for l, task := range taskGroup.Tasks { + t := &structs.Task{} + apiTaskToStructsTask(task, t) + tg.Tasks[l] = t + } +} + +func apiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) { + structsTask.Name = apiTask.Name + structsTask.Driver = apiTask.Driver + structsTask.User = apiTask.User + structsTask.Config = apiTask.Config + structsTask.Constraints = make([]*structs.Constraint, len(apiTask.Constraints)) + for i, constraint := range apiTask.Constraints { + c := &structs.Constraint{} + apiConstraintToStructs(constraint, c) + structsTask.Constraints[i] = c + } + structsTask.Env = apiTask.Env + structsTask.Services = make([]*structs.Service, len(apiTask.Services)) + for i, service := range apiTask.Services { + structsTask.Services[i] = &structs.Service{ + Name: service.Name, + PortLabel: service.PortLabel, + Tags: service.Tags, + } + structsTask.Services[i].Checks = make([]*structs.ServiceCheck, len(service.Checks)) + for j, check := range service.Checks { + structsTask.Services[i].Checks[j] = &structs.ServiceCheck{ + Name: check.Name, + Type: check.Type, + Command: check.Command, + Args: check.Args, + Path: check.Path, + Protocol: check.Protocol, + PortLabel: check.PortLabel, + Interval: check.Interval, + Timeout: check.Timeout, + InitialStatus: check.InitialStatus, + } + } + } + structsTask.Resources = &structs.Resources{ + CPU: *apiTask.Resources.CPU, + MemoryMB: *apiTask.Resources.MemoryMB, + IOPS: *apiTask.Resources.IOPS, + } + structsTask.Resources.Networks = make([]*structs.NetworkResource, len(apiTask.Resources.Networks)) + for i, nw := range apiTask.Resources.Networks { + structsTask.Resources.Networks[i] = &structs.NetworkResource{ + CIDR: nw.CIDR, + IP: nw.IP, + MBits: *nw.MBits, + } + structsTask.Resources.Networks[i].DynamicPorts = make([]structs.Port, len(nw.DynamicPorts)) + structsTask.Resources.Networks[i].ReservedPorts = make([]structs.Port, len(nw.ReservedPorts)) + for j, dp := range nw.DynamicPorts { + structsTask.Resources.Networks[i].DynamicPorts[j] = structs.Port{ + Label: dp.Label, + Value: dp.Value, + } + } + for j, rp := range nw.ReservedPorts { + structsTask.Resources.Networks[i].ReservedPorts[j] = structs.Port{ + Label: rp.Label, + Value: rp.Value, + } + } + } + structsTask.Meta = apiTask.Meta + structsTask.KillTimeout = *apiTask.KillTimeout + structsTask.LogConfig = &structs.LogConfig{ + MaxFiles: *apiTask.LogConfig.MaxFiles, + MaxFileSizeMB: *apiTask.LogConfig.MaxFileSizeMB, + } + structsTask.Artifacts = make([]*structs.TaskArtifact, len(apiTask.Artifacts)) + for k, ta := range apiTask.Artifacts { + structsTask.Artifacts[k] = &structs.TaskArtifact{ + GetterSource: *ta.GetterSource, + GetterOptions: ta.GetterOptions, + RelativeDest: *ta.RelativeDest, + } + } + if apiTask.Vault != nil { + structsTask.Vault = &structs.Vault{ + Policies: apiTask.Vault.Policies, + Env: *apiTask.Vault.Env, + ChangeMode: *apiTask.Vault.ChangeMode, + ChangeSignal: *apiTask.Vault.ChangeSignal, + } + } + structsTask.Templates = make([]*structs.Template, len(apiTask.Templates)) + for i, template := range apiTask.Templates { + structsTask.Templates[i] = &structs.Template{ + SourcePath: *template.SourcePath, + DestPath: *template.DestPath, + EmbeddedTmpl: *template.EmbeddedTmpl, + ChangeMode: *template.ChangeMode, + ChangeSignal: *template.ChangeSignal, + Splay: *template.Splay, + Perms: *template.Perms, + } + } + if apiTask.DispatchPayload != nil { + structsTask.DispatchPayload = &structs.DispatchPayloadConfig{ + File: apiTask.DispatchPayload.File, + } + } +} + +func apiConstraintToStructs(c1 *api.Constraint, c2 *structs.Constraint) { + c2.LTarget = c1.LTarget + c2.RTarget = c1.RTarget + c2.Operand = c1.Operand +} diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index 4f3542a81..0941988ce 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -5,8 +5,11 @@ import ( "net/http/httptest" "reflect" "testing" + "time" "github.com/golang/snappy" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" ) @@ -15,10 +18,10 @@ func TestHTTP_JobsList(t *testing.T) { httpTest(t, nil, func(s *TestServer) { for i := 0; i < 3; i++ { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -67,12 +70,12 @@ func TestHTTP_PrefixJobsList(t *testing.T) { httpTest(t, nil, func(s *TestServer) { for i := 0; i < 3; i++ { // Create the job - job := mock.Job() - job.ID = ids[i] - job.TaskGroups[0].Count = 1 - args := structs.JobRegisterRequest{ + job := api.MockJob() + job.ID = &ids[i] + *job.TaskGroups[0].Count = 1 + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -115,10 +118,10 @@ func TestHTTP_PrefixJobsList(t *testing.T) { func TestHTTP_JobsRegister(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } buf := encodeReq(args) @@ -148,7 +151,7 @@ func TestHTTP_JobsRegister(t *testing.T) { // Check the job is registered getReq := structs.JobSpecificRequest{ - JobID: job.ID, + JobID: *job.ID, QueryOptions: structs.QueryOptions{Region: "global"}, } var getResp structs.SingleJobResponse @@ -162,13 +165,70 @@ func TestHTTP_JobsRegister(t *testing.T) { }) } +func TestHTTP_JobsRegister_Defaulting(t *testing.T) { + httpTest(t, nil, func(s *TestServer) { + // Create the job + job := api.MockJob() + + // Do not set its priority + job.Priority = nil + + args := api.JobRegisterRequest{ + Job: job, + WriteRequest: api.WriteRequest{Region: "global"}, + } + buf := encodeReq(args) + + // Make the HTTP request + req, err := http.NewRequest("PUT", "/v1/jobs", buf) + if err != nil { + t.Fatalf("err: %v", err) + } + respW := httptest.NewRecorder() + + // Make the request + obj, err := s.Server.JobsRequest(respW, req) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Check the response + dereg := obj.(structs.JobRegisterResponse) + if dereg.EvalID == "" { + t.Fatalf("bad: %v", dereg) + } + + // Check for the index + if respW.HeaderMap.Get("X-Nomad-Index") == "" { + t.Fatalf("missing index") + } + + // Check the job is registered + getReq := structs.JobSpecificRequest{ + JobID: *job.ID, + QueryOptions: structs.QueryOptions{Region: "global"}, + } + var getResp structs.SingleJobResponse + if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil { + t.Fatalf("err: %v", err) + } + + if getResp.Job == nil { + t.Fatalf("job does not exist") + } + if getResp.Job.Priority != 50 { + t.Fatalf("job didn't get defaulted") + } + }) +} + func TestHTTP_JobQuery(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -176,7 +236,7 @@ func TestHTTP_JobQuery(t *testing.T) { } // Make the HTTP request - req, err := http.NewRequest("GET", "/v1/job/"+job.ID, nil) + req, err := http.NewRequest("GET", "/v1/job/"+*job.ID, nil) if err != nil { t.Fatalf("err: %v", err) } @@ -201,7 +261,7 @@ func TestHTTP_JobQuery(t *testing.T) { // Check the job j := obj.(*structs.Job) - if j.ID != job.ID { + if j.ID != *job.ID { t.Fatalf("bad: %#v", j) } }) @@ -263,15 +323,15 @@ func TestHTTP_JobQuery_Payload(t *testing.T) { func TestHTTP_JobUpdate(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } buf := encodeReq(args) // Make the HTTP request - req, err := http.NewRequest("PUT", "/v1/job/"+job.ID, buf) + req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID, buf) if err != nil { t.Fatalf("err: %v", err) } @@ -296,7 +356,7 @@ func TestHTTP_JobUpdate(t *testing.T) { // Check the job is registered getReq := structs.JobSpecificRequest{ - JobID: job.ID, + JobID: *job.ID, QueryOptions: structs.QueryOptions{Region: "global"}, } var getResp structs.SingleJobResponse @@ -313,10 +373,10 @@ func TestHTTP_JobUpdate(t *testing.T) { func TestHTTP_JobDelete(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -324,7 +384,7 @@ func TestHTTP_JobDelete(t *testing.T) { } // Make the HTTP request - req, err := http.NewRequest("DELETE", "/v1/job/"+job.ID, nil) + req, err := http.NewRequest("DELETE", "/v1/job/"+*job.ID, nil) if err != nil { t.Fatalf("err: %v", err) } @@ -349,7 +409,7 @@ func TestHTTP_JobDelete(t *testing.T) { // Check the job is gone getReq := structs.JobSpecificRequest{ - JobID: job.ID, + JobID: *job.ID, QueryOptions: structs.QueryOptions{Region: "global"}, } var getResp structs.SingleJobResponse @@ -365,10 +425,10 @@ func TestHTTP_JobDelete(t *testing.T) { func TestHTTP_JobForceEvaluate(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -376,7 +436,7 @@ func TestHTTP_JobForceEvaluate(t *testing.T) { } // Make the HTTP request - req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/evaluate", nil) + req, err := http.NewRequest("POST", "/v1/job/"+*job.ID+"/evaluate", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -404,10 +464,10 @@ func TestHTTP_JobForceEvaluate(t *testing.T) { func TestHTTP_JobEvaluations(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the job - job := mock.Job() - args := structs.JobRegisterRequest{ + job := api.MockJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -415,7 +475,7 @@ func TestHTTP_JobEvaluations(t *testing.T) { } // Make the HTTP request - req, err := http.NewRequest("GET", "/v1/job/"+job.ID+"/evaluations", nil) + req, err := http.NewRequest("GET", "/v1/job/"+*job.ID+"/evaluations", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -503,10 +563,10 @@ func TestHTTP_JobAllocations(t *testing.T) { func TestHTTP_PeriodicForce(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create and register a periodic job. - job := mock.PeriodicJob() - args := structs.JobRegisterRequest{ + job := api.MockPeriodicJob() + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -514,7 +574,7 @@ func TestHTTP_PeriodicForce(t *testing.T) { } // Make the HTTP request - req, err := http.NewRequest("POST", "/v1/job/"+job.ID+"/periodic/force", nil) + req, err := http.NewRequest("POST", "/v1/job/"+*job.ID+"/periodic/force", nil) if err != nil { t.Fatalf("err: %v", err) } @@ -578,13 +638,13 @@ func TestHTTP_JobPlan(t *testing.T) { func TestHTTP_JobDispatch(t *testing.T) { httpTest(t, nil, func(s *TestServer) { // Create the parameterized job - job := mock.Job() - job.Type = structs.JobTypeBatch - job.ParameterizedJob = &structs.ParameterizedJobConfig{} + job := api.MockJob() + job.Type = helper.StringToPtr("batch") + job.ParameterizedJob = &api.ParameterizedJobConfig{} - args := structs.JobRegisterRequest{ + args := api.JobRegisterRequest{ Job: job, - WriteRequest: structs.WriteRequest{Region: "global"}, + WriteRequest: api.WriteRequest{Region: "global"}, } var resp structs.JobRegisterResponse if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil { @@ -599,7 +659,7 @@ func TestHTTP_JobDispatch(t *testing.T) { buf := encodeReq(args2) // Make the HTTP request - req2, err := http.NewRequest("PUT", "/v1/job/"+job.ID+"/dispatch", buf) + req2, err := http.NewRequest("PUT", "/v1/job/"+*job.ID+"/dispatch", buf) if err != nil { t.Fatalf("err: %v", err) } @@ -622,3 +682,355 @@ func TestHTTP_JobDispatch(t *testing.T) { } }) } + +func TestJobs_ApiJobToStructsJob(t *testing.T) { + apiJob := &api.Job{ + Region: helper.StringToPtr("global"), + ID: helper.StringToPtr("foo"), + ParentID: helper.StringToPtr("lol"), + Name: helper.StringToPtr("name"), + Type: helper.StringToPtr("service"), + Priority: helper.IntToPtr(50), + AllAtOnce: helper.BoolToPtr(true), + Datacenters: []string{"dc1", "dc2"}, + Constraints: []*api.Constraint{ + { + LTarget: "a", + RTarget: "b", + Operand: "c", + }, + }, + Update: &api.UpdateStrategy{ + Stagger: 1 * time.Second, + MaxParallel: 5, + }, + Periodic: &api.PeriodicConfig{ + Enabled: helper.BoolToPtr(true), + Spec: helper.StringToPtr("spec"), + SpecType: helper.StringToPtr("cron"), + ProhibitOverlap: helper.BoolToPtr(true), + }, + ParameterizedJob: &api.ParameterizedJobConfig{ + Payload: "payload", + MetaRequired: []string{"a", "b"}, + MetaOptional: []string{"c", "d"}, + }, + Payload: []byte("payload"), + Meta: map[string]string{ + "foo": "bar", + }, + TaskGroups: []*api.TaskGroup{ + { + Name: helper.StringToPtr("group1"), + Count: helper.IntToPtr(5), + Constraints: []*api.Constraint{ + { + LTarget: "x", + RTarget: "y", + Operand: "z", + }, + }, + RestartPolicy: &api.RestartPolicy{ + Interval: helper.TimeToPtr(1 * time.Second), + Attempts: helper.IntToPtr(5), + Delay: helper.TimeToPtr(10 * time.Second), + Mode: helper.StringToPtr("delay"), + }, + EphemeralDisk: &api.EphemeralDisk{ + SizeMB: helper.IntToPtr(100), + Sticky: helper.BoolToPtr(true), + Migrate: helper.BoolToPtr(true), + }, + Meta: map[string]string{ + "key": "value", + }, + Tasks: []*api.Task{ + { + Name: "task1", + Driver: "docker", + User: "mary", + Config: map[string]interface{}{ + "lol": "code", + }, + Env: map[string]string{ + "hello": "world", + }, + Constraints: []*api.Constraint{ + { + LTarget: "x", + RTarget: "y", + Operand: "z", + }, + }, + + Services: []api.Service{ + { + Id: "id", + Name: "serviceA", + Tags: []string{"1", "2"}, + PortLabel: "foo", + Checks: []api.ServiceCheck{ + { + Id: "hello", + Name: "bar", + Type: "http", + Command: "foo", + Args: []string{"a", "b"}, + Path: "/check", + Protocol: "http", + PortLabel: "foo", + Interval: 4 * time.Second, + Timeout: 2 * time.Second, + InitialStatus: "ok", + }, + }, + }, + }, + Resources: &api.Resources{ + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(10), + Networks: []*api.NetworkResource{ + { + IP: "10.10.11.1", + MBits: helper.IntToPtr(10), + ReservedPorts: []api.Port{ + { + Label: "http", + Value: 80, + }, + }, + DynamicPorts: []api.Port{ + { + Label: "ssh", + Value: 2000, + }, + }, + }, + }, + }, + Meta: map[string]string{ + "lol": "code", + }, + KillTimeout: helper.TimeToPtr(10 * time.Second), + LogConfig: &api.LogConfig{ + MaxFiles: helper.IntToPtr(10), + MaxFileSizeMB: helper.IntToPtr(100), + }, + Artifacts: []*api.TaskArtifact{ + { + GetterSource: helper.StringToPtr("source"), + GetterOptions: map[string]string{ + "a": "b", + }, + RelativeDest: helper.StringToPtr("dest"), + }, + }, + Vault: &api.Vault{ + Policies: []string{"a", "b", "c"}, + Env: helper.BoolToPtr(true), + ChangeMode: helper.StringToPtr("c"), + ChangeSignal: helper.StringToPtr("sighup"), + }, + Templates: []*api.Template{ + { + SourcePath: helper.StringToPtr("source"), + DestPath: helper.StringToPtr("dest"), + EmbeddedTmpl: helper.StringToPtr("embedded"), + ChangeMode: helper.StringToPtr("change"), + ChangeSignal: helper.StringToPtr("signal"), + Splay: helper.TimeToPtr(1 * time.Minute), + Perms: helper.StringToPtr("666"), + }, + }, + DispatchPayload: &api.DispatchPayloadConfig{ + File: "fileA", + }, + }, + }, + }, + }, + VaultToken: helper.StringToPtr("token"), + Status: helper.StringToPtr("status"), + StatusDescription: helper.StringToPtr("status_desc"), + CreateIndex: helper.Uint64ToPtr(1), + ModifyIndex: helper.Uint64ToPtr(3), + JobModifyIndex: helper.Uint64ToPtr(5), + } + + expected := &structs.Job{ + Region: "global", + ID: "foo", + ParentID: "lol", + Name: "name", + Type: "service", + Priority: 50, + AllAtOnce: true, + Datacenters: []string{"dc1", "dc2"}, + Constraints: []*structs.Constraint{ + { + LTarget: "a", + RTarget: "b", + Operand: "c", + }, + }, + Update: structs.UpdateStrategy{ + Stagger: 1 * time.Second, + MaxParallel: 5, + }, + Periodic: &structs.PeriodicConfig{ + Enabled: true, + Spec: "spec", + SpecType: "cron", + ProhibitOverlap: true, + }, + ParameterizedJob: &structs.ParameterizedJobConfig{ + Payload: "payload", + MetaRequired: []string{"a", "b"}, + MetaOptional: []string{"c", "d"}, + }, + Payload: []byte("payload"), + Meta: map[string]string{ + "foo": "bar", + }, + TaskGroups: []*structs.TaskGroup{ + { + Name: "group1", + Count: 5, + Constraints: []*structs.Constraint{ + { + LTarget: "x", + RTarget: "y", + Operand: "z", + }, + }, + RestartPolicy: &structs.RestartPolicy{ + Interval: 1 * time.Second, + Attempts: 5, + Delay: 10 * time.Second, + Mode: "delay", + }, + EphemeralDisk: &structs.EphemeralDisk{ + SizeMB: 100, + Sticky: true, + Migrate: true, + }, + Meta: map[string]string{ + "key": "value", + }, + Tasks: []*structs.Task{ + { + Name: "task1", + Driver: "docker", + User: "mary", + Config: map[string]interface{}{ + "lol": "code", + }, + Constraints: []*structs.Constraint{ + { + LTarget: "x", + RTarget: "y", + Operand: "z", + }, + }, + Env: map[string]string{ + "hello": "world", + }, + Services: []*structs.Service{ + &structs.Service{ + Name: "serviceA", + Tags: []string{"1", "2"}, + PortLabel: "foo", + Checks: []*structs.ServiceCheck{ + &structs.ServiceCheck{ + Name: "bar", + Type: "http", + Command: "foo", + Args: []string{"a", "b"}, + Path: "/check", + Protocol: "http", + PortLabel: "foo", + Interval: 4 * time.Second, + Timeout: 2 * time.Second, + InitialStatus: "ok", + }, + }, + }, + }, + Resources: &structs.Resources{ + CPU: 100, + MemoryMB: 10, + Networks: []*structs.NetworkResource{ + { + IP: "10.10.11.1", + MBits: 10, + ReservedPorts: []structs.Port{ + { + Label: "http", + Value: 80, + }, + }, + DynamicPorts: []structs.Port{ + { + Label: "ssh", + Value: 2000, + }, + }, + }, + }, + }, + Meta: map[string]string{ + "lol": "code", + }, + KillTimeout: 10 * time.Second, + LogConfig: &structs.LogConfig{ + MaxFiles: 10, + MaxFileSizeMB: 100, + }, + Artifacts: []*structs.TaskArtifact{ + { + GetterSource: "source", + GetterOptions: map[string]string{ + "a": "b", + }, + RelativeDest: "dest", + }, + }, + Vault: &structs.Vault{ + Policies: []string{"a", "b", "c"}, + Env: true, + ChangeMode: "c", + ChangeSignal: "sighup", + }, + Templates: []*structs.Template{ + { + SourcePath: "source", + DestPath: "dest", + EmbeddedTmpl: "embedded", + ChangeMode: "change", + ChangeSignal: "SIGNAL", + Splay: 1 * time.Minute, + Perms: "666", + }, + }, + DispatchPayload: &structs.DispatchPayloadConfig{ + File: "fileA", + }, + }, + }, + }, + }, + + VaultToken: "token", + Status: "status", + StatusDescription: "status_desc", + CreateIndex: 1, + ModifyIndex: 3, + JobModifyIndex: 5, + } + + structsJob := apiJobToStructJob(apiJob) + + if !reflect.DeepEqual(expected, structsJob) { + t.Fatalf("bad %#v", structsJob) + } +} diff --git a/command/alloc_status.go b/command/alloc_status.go index d5ea53d17..27b7cd0c4 100644 --- a/command/alloc_status.go +++ b/command/alloc_status.go @@ -410,8 +410,8 @@ func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task str } // Display the rolled up stats. If possible prefer the live statistics - cpuUsage := strconv.Itoa(resource.CPU) - memUsage := humanize.IBytes(uint64(resource.MemoryMB * bytesPerMegabyte)) + cpuUsage := strconv.Itoa(*resource.CPU) + memUsage := humanize.IBytes(uint64(*resource.MemoryMB * bytesPerMegabyte)) if stats != nil { if ru, ok := stats.Tasks[task]; ok && ru != nil && ru.ResourceUsage != nil { if cs := ru.ResourceUsage.CpuStats; cs != nil { @@ -425,8 +425,8 @@ func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task str resourcesOutput = append(resourcesOutput, fmt.Sprintf("%v MHz|%v|%v|%v|%v", cpuUsage, memUsage, - humanize.IBytes(uint64(resource.DiskMB*bytesPerMegabyte)), - resource.IOPS, + humanize.IBytes(uint64(*resource.DiskMB*bytesPerMegabyte)), + *resource.IOPS, firstAddr)) for i := 1; i < len(addr); i++ { resourcesOutput = append(resourcesOutput, fmt.Sprintf("||||%v", addr[i])) diff --git a/command/helpers.go b/command/helpers.go index e27ed09b8..1f5b6df5f 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -12,7 +12,6 @@ import ( gg "github.com/hashicorp/go-getter" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/jobspec" - "github.com/hashicorp/nomad/nomad/structs" "github.com/ryanuber/columnize" ) @@ -235,7 +234,7 @@ type JobGetter struct { } // StructJob returns the Job struct from jobfile. -func (j *JobGetter) StructJob(jpath string) (*structs.Job, error) { +func (j *JobGetter) ApiJob(jpath string) (*api.Job, error) { var jobfile io.Reader switch jpath { case "-": diff --git a/command/helpers_test.go b/command/helpers_test.go index 8d67f123f..a430c91a7 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -11,6 +11,9 @@ import ( "testing" "time" + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" + "github.com/hashicorp/nomad/helper/flatmap" "github.com/mitchellh/cli" ) @@ -208,8 +211,46 @@ const ( }` ) -// Test StructJob with local jobfile -func TestStructJobWithLocal(t *testing.T) { +var ( + expectedApiJob = &api.Job{ + ID: helper.StringToPtr("job1"), + Region: helper.StringToPtr("global"), + Priority: helper.IntToPtr(50), + Name: helper.StringToPtr("job1"), + Type: helper.StringToPtr("service"), + Datacenters: []string{"dc1"}, + TaskGroups: []*api.TaskGroup{ + { + Name: helper.StringToPtr("group1"), + Count: helper.IntToPtr(1), + RestartPolicy: &api.RestartPolicy{ + Attempts: helper.IntToPtr(10), + Interval: helper.TimeToPtr(15 * time.Second), + Mode: helper.StringToPtr("delay"), + }, + EphemeralDisk: &api.EphemeralDisk{ + SizeMB: helper.IntToPtr(300), + }, + + Tasks: []*api.Task{ + { + Driver: "exec", + Name: "task1", + Resources: &api.Resources{ + CPU: helper.IntToPtr(100), + MemoryMB: helper.IntToPtr(10), + IOPS: helper.IntToPtr(0), + }, + LogConfig: api.DefaultLogConfig(), + }, + }, + }, + }, + } +) + +// Test APIJob with local jobfile +func TestJobGetter_LocalFile(t *testing.T) { fh, err := ioutil.TempFile("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -221,19 +262,20 @@ func TestStructJobWithLocal(t *testing.T) { } j := &JobGetter{} - sj, err := j.StructJob(fh.Name()) + aj, err := j.ApiJob(fh.Name()) if err != nil { t.Fatalf("err: %s", err) } - err = sj.Validate() - if err != nil { - t.Fatalf("err: %s", err) + if !reflect.DeepEqual(expectedApiJob, aj) { + eflat := flatmap.Flatten(expectedApiJob, nil, false) + aflat := flatmap.Flatten(aj, nil, false) + t.Fatalf("got:\n%v\nwant:\n%v", aflat, eflat) } } // Test StructJob with jobfile from HTTP Server -func TestStructJobWithHTTPServer(t *testing.T) { +func TestJobGetter_HTTPServer(t *testing.T) { http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, job) }) @@ -243,13 +285,13 @@ func TestStructJobWithHTTPServer(t *testing.T) { time.Sleep(100 * time.Millisecond) j := &JobGetter{} - sj, err := j.StructJob("http://127.0.0.1:12345/") + aj, err := j.ApiJob("http://127.0.0.1:12345/") if err != nil { t.Fatalf("err: %s", err) } - - err = sj.Validate() - if err != nil { - t.Fatalf("err: %s", err) + if !reflect.DeepEqual(expectedApiJob, aj) { + eflat := flatmap.Flatten(expectedApiJob, nil, false) + aflat := flatmap.Flatten(aj, nil, false) + t.Fatalf("got:\n%v\nwant:\n%v", aflat, eflat) } } diff --git a/command/logs.go b/command/logs.go index 463f23833..6b3d2bb4e 100644 --- a/command/logs.go +++ b/command/logs.go @@ -169,7 +169,7 @@ func (l *LogsCommand) Run(args []string) int { // Try to determine the tasks name from the allocation var tasks []*api.Task for _, tg := range alloc.Job.TaskGroups { - if tg.Name == alloc.TaskGroup { + if *tg.Name == alloc.TaskGroup { if len(tg.Tasks) == 1 { task = tg.Tasks[0].Name break diff --git a/command/node_status.go b/command/node_status.go index 6dc28aa2d..e83af3512 100644 --- a/command/node_status.go +++ b/command/node_status.go @@ -11,6 +11,7 @@ import ( "github.com/mitchellh/colorstring" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" ) const ( @@ -487,10 +488,10 @@ func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation, // Get Resources var cpu, mem, disk, iops int for _, alloc := range runningAllocs { - cpu += alloc.Resources.CPU - mem += alloc.Resources.MemoryMB - disk += alloc.Resources.DiskMB - iops += alloc.Resources.IOPS + cpu += *alloc.Resources.CPU + mem += *alloc.Resources.MemoryMB + disk += *alloc.Resources.DiskMB + iops += *alloc.Resources.IOPS } resources := make([]string, 2) @@ -499,9 +500,9 @@ func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation, cpu, total.CPU, humanize.IBytes(uint64(mem*bytesPerMegabyte)), - humanize.IBytes(uint64(total.MemoryMB*bytesPerMegabyte)), + humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte)), humanize.IBytes(uint64(disk*bytesPerMegabyte)), - humanize.IBytes(uint64(total.DiskMB*bytesPerMegabyte)), + humanize.IBytes(uint64(*total.DiskMB*bytesPerMegabyte)), iops, total.IOPS) @@ -518,10 +519,10 @@ func computeNodeTotalResources(node *api.Node) api.Resources { if res == nil { res = &api.Resources{} } - total.CPU = r.CPU - res.CPU - total.MemoryMB = r.MemoryMB - res.MemoryMB - total.DiskMB = r.DiskMB - res.DiskMB - total.IOPS = r.IOPS - res.IOPS + total.CPU = helper.IntToPtr(*r.CPU - *res.CPU) + total.MemoryMB = helper.IntToPtr(*r.MemoryMB - *res.MemoryMB) + total.DiskMB = helper.IntToPtr(*r.DiskMB - *res.DiskMB) + total.IOPS = helper.IntToPtr(*r.IOPS - *res.IOPS) return total } @@ -550,7 +551,7 @@ func getActualResources(client *api.Client, runningAllocs []*api.Allocation, nod math.Floor(cpu), total.CPU, humanize.IBytes(mem), - humanize.IBytes(uint64(total.MemoryMB*bytesPerMegabyte))) + humanize.IBytes(uint64(*total.MemoryMB*bytesPerMegabyte))) return resources, nil } diff --git a/command/plan.go b/command/plan.go index 06d490530..896e5f77d 100644 --- a/command/plan.go +++ b/command/plan.go @@ -7,7 +7,6 @@ import ( "time" "github.com/hashicorp/nomad/api" - "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/scheduler" "github.com/mitchellh/colorstring" ) @@ -99,28 +98,12 @@ func (c *PlanCommand) Run(args []string) int { path := args[0] // Get Job struct from Jobfile - job, err := c.JobGetter.StructJob(args[0]) + job, err := c.JobGetter.ApiJob(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 255 } - // Initialize any fields that need to be. - job.Canonicalize() - - // Check that the job is valid - if err := job.Validate(); err != nil { - c.Ui.Error(fmt.Sprintf("Error validating job: %s", err)) - return 255 - } - - // Convert it to something we can use - apiJob, err := convertStructJob(job) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error converting job: %s", err)) - return 255 - } - // Get the HTTP client client, err := c.Meta.Client() if err != nil { @@ -129,12 +112,12 @@ func (c *PlanCommand) Run(args []string) int { } // Force the region to be that of the job. - if r := job.Region; r != "" { - client.SetRegion(r) + if r := job.Region; r != nil { + client.SetRegion(*r) } // Submit the job - resp, _, err := client.Jobs().Plan(apiJob, diff, nil) + resp, _, err := client.Jobs().Plan(job, diff, nil) if err != nil { c.Ui.Error(fmt.Sprintf("Error during plan: %s", err)) return 255 @@ -179,7 +162,7 @@ func formatJobModifyIndex(jobModifyIndex uint64, jobName string) string { } // formatDryRun produces a string explaining the results of the dry run. -func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string { +func formatDryRun(resp *api.JobPlanResponse, job *api.Job) string { var rolling *api.Evaluation for _, eval := range resp.CreatedEvals { if eval.TriggeredBy == "rolling-update" { @@ -192,7 +175,7 @@ func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string { out = "[bold][green]- All tasks successfully allocated.[reset]\n" } else { // Change the output depending on if we are a system job or not - if job.Type == "system" { + if job.Type != nil && *job.Type == "system" { out = "[bold][yellow]- WARNING: Failed to place allocations on all nodes.[reset]\n" } else { out = "[bold][yellow]- WARNING: Failed to place all allocations.[reset]\n" @@ -218,9 +201,14 @@ func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string { } if next := resp.NextPeriodicLaunch; !next.IsZero() { - now := time.Now().In(job.Periodic.GetLocation()) - out += fmt.Sprintf("[green]- If submitted now, next periodic launch would be at %s (%s from now).\n", - formatTime(next), formatTimeDifference(now, next, time.Second)) + loc, err := job.Periodic.GetLocation() + if err != nil { + out += fmt.Sprintf("[yellow]- Invalid time zone: %v", err) + } else { + now := time.Now().In(loc) + out += fmt.Sprintf("[green]- If submitted now, next periodic launch would be at %s (%s from now).\n", + formatTime(next), formatTimeDifference(now, next, time.Second)) + } } out = strings.TrimSuffix(out, "\n") diff --git a/command/plan_test.go b/command/plan_test.go index 3bd7511ba..f8aed7e5b 100644 --- a/command/plan_test.go +++ b/command/plan_test.go @@ -1,11 +1,13 @@ package command import ( + "fmt" "io/ioutil" "os" "strings" "testing" + "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) @@ -17,6 +19,11 @@ func TestPlanCommand_Fails(t *testing.T) { ui := new(cli.MockUi) cmd := &PlanCommand{Meta: Meta{Ui: ui}} + // Create a server + s := testutil.NewTestServer(t, nil) + defer s.Stop() + os.Setenv("NOMAD_ADDR", fmt.Sprintf("http://%s", s.HTTPAddr)) + // Fails on misuse if code := cmd.Run([]string{"some", "bad", "args"}); code != 255 { t.Fatalf("expected exit code 1, got: %d", code) @@ -64,7 +71,7 @@ func TestPlanCommand_Fails(t *testing.T) { if code := cmd.Run([]string{fh2.Name()}); code != 255 { t.Fatalf("expect exit 255, got: %d", code) } - if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error validating") { + if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error during plan") { t.Fatalf("expect validation error, got: %s", out) } ui.ErrorWriter.Reset() diff --git a/command/run.go b/command/run.go index 68c1df340..a5eeb5808 100644 --- a/command/run.go +++ b/command/run.go @@ -12,6 +12,7 @@ import ( "time" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) @@ -132,21 +133,24 @@ func (c *RunCommand) Run(args []string) int { } // Get Job struct from Jobfile - job, err := c.JobGetter.StructJob(args[0]) + job, err := c.JobGetter.ApiJob(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 1 } - // Initialize any fields that need to be. - job.Canonicalize() - - // Check that the job is valid - if err := job.Validate(); err != nil { - c.Ui.Error(fmt.Sprintf("Error validating job: %v", err)) + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) return 1 } + // Force the region to be that of the job. + if r := job.Region; r != nil { + client.SetRegion(*r) + } + // Check if the job is periodic or is a parameterized job periodic := job.IsPeriodic() paramjob := job.IsParameterized() @@ -158,35 +162,24 @@ func (c *RunCommand) Run(args []string) int { } if vaultToken != "" { - job.VaultToken = vaultToken - } - - // Convert it to something we can use - apiJob, err := convertStructJob(job) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error converting job: %s", err)) - return 1 + job.VaultToken = helper.StringToPtr(vaultToken) } // COMPAT 0.4.1 -> 0.5 Remove in 0.6 - if apiJob.TaskGroups != nil { - OUTSIDE: - for _, tg := range apiJob.TaskGroups { - if tg.Tasks != nil { - for _, task := range tg.Tasks { - if task.Resources != nil { - if task.Resources.DiskMB > 0 { - c.Ui.Error("WARNING: disk attribute is deprecated in the resources block. See https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html") - break OUTSIDE - } - } +OUTSIDE: + for _, tg := range job.TaskGroups { + for _, task := range tg.Tasks { + if task.Resources != nil { + if task.Resources.DiskMB != nil { + c.Ui.Error("WARNING: disk attribute is deprecated in the resources block. See https://www.nomadproject.io/docs/job-specification/ephemeral_disk.html") + break OUTSIDE } } } } if output { - req := api.RegisterJobRequest{Job: apiJob} + req := api.RegisterJobRequest{Job: job} buf, err := json.MarshalIndent(req, "", " ") if err != nil { c.Ui.Error(fmt.Sprintf("Error converting job: %s", err)) @@ -197,18 +190,6 @@ func (c *RunCommand) Run(args []string) int { return 0 } - // Get the HTTP client - client, err := c.Meta.Client() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) - return 1 - } - - // Force the region to be that of the job. - if r := job.Region; r != "" { - client.SetRegion(r) - } - // Parse the check-index checkIndex, enforce, err := parseCheckIndex(checkIndexStr) if err != nil { @@ -219,9 +200,9 @@ func (c *RunCommand) Run(args []string) int { // Submit the job var evalID string if enforce { - evalID, _, err = client.Jobs().EnforceRegister(apiJob, checkIndex, nil) + evalID, _, err = client.Jobs().EnforceRegister(job, checkIndex, nil) } else { - evalID, _, err = client.Jobs().Register(apiJob, nil) + evalID, _, err = client.Jobs().Register(job, nil) } if err != nil { if strings.Contains(err.Error(), api.RegisterEnforceIndexErrPrefix) { @@ -243,10 +224,13 @@ func (c *RunCommand) Run(args []string) int { if detach || periodic || paramjob { c.Ui.Output("Job registration successful") if periodic { - now := time.Now().In(job.Periodic.GetLocation()) - next := job.Periodic.Next(now) - c.Ui.Output(fmt.Sprintf("Approximate next launch time: %s (%s from now)", - formatTime(next), formatTimeDifference(now, next, time.Second))) + loc, err := job.Periodic.GetLocation() + if err == nil { + now := time.Now().In(loc) + next := job.Periodic.Next(now) + c.Ui.Output(fmt.Sprintf("Approximate next launch time: %s (%s from now)", + formatTime(next), formatTimeDifference(now, next, time.Second))) + } } else if !paramjob { c.Ui.Output("Evaluation ID: " + evalID) } diff --git a/command/run_test.go b/command/run_test.go index f8a5212be..a8e5e7a6d 100644 --- a/command/run_test.go +++ b/command/run_test.go @@ -1,11 +1,13 @@ package command import ( + "fmt" "io/ioutil" "os" "strings" "testing" + "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) @@ -52,6 +54,11 @@ func TestRunCommand_Fails(t *testing.T) { ui := new(cli.MockUi) cmd := &RunCommand{Meta: Meta{Ui: ui}} + // Create a server + s := testutil.NewTestServer(t, nil) + defer s.Stop() + os.Setenv("NOMAD_ADDR", fmt.Sprintf("http://%s", s.HTTPAddr)) + // Fails on misuse if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 { t.Fatalf("expected exit code 1, got: %d", code) @@ -99,7 +106,7 @@ func TestRunCommand_Fails(t *testing.T) { if code := cmd.Run([]string{fh2.Name()}); code != 1 { t.Fatalf("expect exit 1, got: %d", code) } - if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error validating") { + if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error submitting job") { t.Fatalf("expect validation error, got: %s", out) } ui.ErrorWriter.Reset() diff --git a/command/status.go b/command/status.go index 5d64bc4e1..a9543feff 100644 --- a/command/status.go +++ b/command/status.go @@ -1,8 +1,6 @@ package command import ( - "bytes" - "encoding/gob" "fmt" "sort" "strings" @@ -133,34 +131,30 @@ func (c *StatusCommand) Run(args []string) int { return 1 } - // Check if it is periodic or a parameterized job - sJob, err := convertApiJob(job) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error converting job: %s", err)) - return 1 - } - sJob.Canonicalize() - periodic := sJob.IsPeriodic() - parameterized := sJob.IsParameterized() + periodic := job.IsPeriodic() + parameterized := job.IsParameterized() // Format the job info basic := []string{ - fmt.Sprintf("ID|%s", job.ID), - fmt.Sprintf("Name|%s", job.Name), - fmt.Sprintf("Type|%s", job.Type), - fmt.Sprintf("Priority|%d", job.Priority), + fmt.Sprintf("ID|%s", *job.ID), + fmt.Sprintf("Name|%s", *job.Name), + fmt.Sprintf("Type|%s", *job.Type), + fmt.Sprintf("Priority|%d", *job.Priority), fmt.Sprintf("Datacenters|%s", strings.Join(job.Datacenters, ",")), - fmt.Sprintf("Status|%s", job.Status), + fmt.Sprintf("Status|%s", *job.Status), fmt.Sprintf("Periodic|%v", periodic), fmt.Sprintf("Parameterized|%v", parameterized), } if periodic { - now := time.Now().In(sJob.Periodic.GetLocation()) - next := sJob.Periodic.Next(now) - basic = append(basic, fmt.Sprintf("Next Periodic Launch|%s", - fmt.Sprintf("%s (%s from now)", - formatTime(next), formatTimeDifference(now, next, time.Second)))) + location, err := job.Periodic.GetLocation() + if err == nil { + now := time.Now().In(location) + next := job.Periodic.Next(now) + basic = append(basic, fmt.Sprintf("Next Periodic Launch|%s", + fmt.Sprintf("%s (%s from now)", + formatTime(next), formatTimeDifference(now, next, time.Second)))) + } } c.Ui.Output(formatKV(basic)) @@ -200,7 +194,7 @@ func (c *StatusCommand) outputPeriodicInfo(client *api.Client, job *api.Job) err } // Generate the prefix that matches launched jobs from the periodic job. - prefix := fmt.Sprintf("%s%s", job.ID, structs.PeriodicLaunchSuffix) + prefix := fmt.Sprintf("%s%s", *job.ID, structs.PeriodicLaunchSuffix) children, _, err := client.Jobs().PrefixList(prefix) if err != nil { return fmt.Errorf("Error querying job: %s", err) @@ -216,7 +210,7 @@ func (c *StatusCommand) outputPeriodicInfo(client *api.Client, job *api.Job) err for _, child := range children { // Ensure that we are only showing jobs whose parent is the requested // job. - if child.ParentID != job.ID { + if child.ParentID != *job.ID { continue } @@ -246,8 +240,8 @@ func (c *StatusCommand) outputParameterizedInfo(client *api.Client, job *api.Job return err } - // Generate the prefix that matches launched jobs from the periodic job. - prefix := fmt.Sprintf("%s%s", job.ID, structs.DispatchLaunchSuffix) + // Generate the prefix that matches launched jobs from the parameterized job. + prefix := fmt.Sprintf("%s%s", *job.ID, structs.DispatchLaunchSuffix) children, _, err := client.Jobs().PrefixList(prefix) if err != nil { return fmt.Errorf("Error querying job: %s", err) @@ -263,7 +257,7 @@ func (c *StatusCommand) outputParameterizedInfo(client *api.Client, job *api.Job for _, child := range children { // Ensure that we are only showing jobs whose parent is the requested // job. - if child.ParentID != job.ID { + if child.ParentID != *job.ID { continue } @@ -283,13 +277,13 @@ func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error { var evals, allocs []string // Query the allocations - jobAllocs, _, err := client.Jobs().Allocations(job.ID, c.allAllocs, nil) + jobAllocs, _, err := client.Jobs().Allocations(*job.ID, c.allAllocs, nil) if err != nil { return fmt.Errorf("Error querying job allocations: %s", err) } // Query the evaluations - jobEvals, _, err := client.Jobs().Evaluations(job.ID, nil) + jobEvals, _, err := client.Jobs().Evaluations(*job.ID, nil) if err != nil { return fmt.Errorf("Error querying job evaluations: %s", err) } @@ -367,7 +361,7 @@ func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error { // where appropriate func (c *StatusCommand) outputJobSummary(client *api.Client, job *api.Job) error { // Query the summary - summary, _, err := client.Jobs().Summary(job.ID, nil) + summary, _, err := client.Jobs().Summary(*job.ID, nil) if err != nil { return fmt.Errorf("Error querying job summary: %s", err) } @@ -376,13 +370,8 @@ func (c *StatusCommand) outputJobSummary(client *api.Client, job *api.Job) error return nil } - sJob, err := convertApiJob(job) - if err != nil { - return fmt.Errorf("Error converting job: %s", err) - } - - periodic := sJob.IsPeriodic() - parameterizedJob := sJob.IsParameterized() + periodic := job.IsPeriodic() + parameterizedJob := job.IsParameterized() // Print the summary if !periodic && !parameterizedJob { @@ -450,22 +439,6 @@ func (c *StatusCommand) outputFailedPlacements(failedEval *api.Evaluation) { } } -// convertApiJob is used to take a *api.Job and convert it to an *struct.Job. -// This function is just a hammer and probably needs to be revisited. -func convertApiJob(in *api.Job) (*structs.Job, error) { - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) - var structJob *structs.Job - buf := new(bytes.Buffer) - if err := gob.NewEncoder(buf).Encode(in); err != nil { - return nil, err - } - if err := gob.NewDecoder(buf).Decode(&structJob); err != nil { - return nil, err - } - return structJob, nil -} - // list general information about a list of jobs func createStatusListOutput(jobs []*api.JobListStub) string { out := make([]string, len(jobs)+1) diff --git a/command/stop.go b/command/stop.go index 1f19baae5..aff09c315 100644 --- a/command/stop.go +++ b/command/stop.go @@ -109,7 +109,7 @@ func (c *StopCommand) Run(args []string) int { } // Confirm the stop if the job was a prefix match. - if jobID != job.ID && !autoYes { + if jobID != *job.ID && !autoYes { question := fmt.Sprintf("Are you sure you want to stop job %q? [y/N]", job.ID) answer, err := c.Ui.Ask(question) if err != nil { @@ -132,7 +132,7 @@ func (c *StopCommand) Run(args []string) int { } // Invoke the stop - evalID, _, err := client.Jobs().Deregister(job.ID, nil) + evalID, _, err := client.Jobs().Deregister(*job.ID, nil) if err != nil { c.Ui.Error(fmt.Sprintf("Error deregistering job: %s", err)) return 1 diff --git a/command/util_test.go b/command/util_test.go index ae6b5ab7f..23b7715a7 100644 --- a/command/util_test.go +++ b/command/util_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/testutil" ) @@ -44,18 +45,18 @@ func testJob(jobID string) *api.Job { SetConfig("run_for", "5s"). SetConfig("exit_code", 0). Require(&api.Resources{ - MemoryMB: 256, - CPU: 100, + MemoryMB: helper.IntToPtr(256), + CPU: helper.IntToPtr(100), }). SetLogConfig(&api.LogConfig{ - MaxFiles: 1, - MaxFileSizeMB: 2, + MaxFiles: helper.IntToPtr(1), + MaxFileSizeMB: helper.IntToPtr(2), }) group := api.NewTaskGroup("group1", 1). AddTask(task). RequireDisk(&api.EphemeralDisk{ - SizeMB: 20, + SizeMB: helper.IntToPtr(20), }) job := api.NewBatchJob(jobID, jobID, "region1", 1). diff --git a/command/validate.go b/command/validate.go index 7efadca0a..fa59ff424 100644 --- a/command/validate.go +++ b/command/validate.go @@ -3,11 +3,14 @@ package command import ( "fmt" "strings" + + "github.com/mitchellh/colorstring" ) type ValidateCommand struct { Meta JobGetter + color *colorstring.Colorize } func (c *ValidateCommand) Help() string { @@ -43,20 +46,41 @@ func (c *ValidateCommand) Run(args []string) int { } // Get Job struct from Jobfile - job, err := c.JobGetter.StructJob(args[0]) + job, err := c.JobGetter.ApiJob(args[0]) if err != nil { c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err)) return 1 } - // Initialize any fields that need to be. - job.Canonicalize() + // Get the HTTP client + client, err := c.Meta.Client() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) + return 255 + } + + // Force the region to be that of the job. + if r := job.Region; r != nil { + client.SetRegion(*r) + } // Check that the job is valid - if err := job.Validate(); err != nil { + jr, _, err := client.Jobs().Validate(job, nil) + if err != nil { c.Ui.Error(fmt.Sprintf("Error validating job: %s", err)) return 1 } + if jr != nil && !jr.DriverConfigValidated { + c.Ui.Output(c.Colorize().Color("[bold][orange]Driver configuration not validated.[reset]")) + } + + if jr != nil && len(jr.ValidationErrors) > 0 { + c.Ui.Output("Job Validation errors:") + for _, err := range jr.ValidationErrors { + c.Ui.Output(err) + } + return 1 + } // Done! c.Ui.Output("Job validation successful") diff --git a/command/validate_test.go b/command/validate_test.go index 11be97511..c4f4069ca 100644 --- a/command/validate_test.go +++ b/command/validate_test.go @@ -1,11 +1,13 @@ package command import ( + "fmt" "io/ioutil" "os" "strings" "testing" + "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" ) @@ -17,6 +19,11 @@ func TestValidateCommand(t *testing.T) { ui := new(cli.MockUi) cmd := &ValidateCommand{Meta: Meta{Ui: ui}} + // Create a server + s := testutil.NewTestServer(t, nil) + defer s.Stop() + os.Setenv("NOMAD_ADDR", fmt.Sprintf("http://%s", s.HTTPAddr)) + fh, err := ioutil.TempFile("", "nomad") if err != nil { t.Fatalf("err: %s", err) @@ -30,6 +37,9 @@ job "job1" { count = 1 task "task1" { driver = "exec" + config { + command = "/bin/sleep" + } resources = { cpu = 1000 memory = 512 @@ -113,6 +123,10 @@ func TestValidateCommand_From_STDIN(t *testing.T) { Meta: Meta{Ui: ui}, JobGetter: JobGetter{testStdin: stdinR}, } + // Create a server + s := testutil.NewTestServer(t, nil) + defer s.Stop() + os.Setenv("NOMAD_ADDR", fmt.Sprintf("http://%s", s.HTTPAddr)) go func() { stdinW.WriteString(` @@ -123,6 +137,9 @@ job "job1" { count = 1 task "task1" { driver = "exec" + config { + command = "/bin/echo" + } resources = { cpu = 1000 memory = 512 diff --git a/helper/funcs.go b/helper/funcs.go index 89538f42c..52ecd0f4f 100644 --- a/helper/funcs.go +++ b/helper/funcs.go @@ -1,6 +1,9 @@ package helper -import "regexp" +import ( + "regexp" + "time" +) // validUUID is used to check if a given string looks like a UUID var validUUID = regexp.MustCompile(`(?i)^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}$`) @@ -20,6 +23,26 @@ func BoolToPtr(b bool) *bool { return &b } +// IntToPtr returns the pointer to an int +func IntToPtr(i int) *int { + return &i +} + +// UintToPtr returns the pointer to an uint +func Uint64ToPtr(u uint64) *uint64 { + return &u +} + +// StringToPtr returns the pointer to a string +func StringToPtr(str string) *string { + return &str +} + +// TimeToPtr returns the pointer to a time stamp +func TimeToPtr(t time.Duration) *time.Duration { + return &t +} + // MapStringStringSliceValueSet returns the set of values in a map[string][]string func MapStringStringSliceValueSet(m map[string][]string) []string { set := make(map[string]struct{}) diff --git a/jobspec/parse.go b/jobspec/parse.go index 1f6388103..bc9816c26 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -9,11 +9,14 @@ import ( "regexp" "strconv" "strings" + "time" "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/client/driver" + "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/mapstructure" ) @@ -25,7 +28,7 @@ var errPortLabel = fmt.Errorf("Port label does not conform to naming requirement // // Due to current internal limitations, the entire contents of the // io.Reader will be copied into memory first before parsing. -func Parse(r io.Reader) (*structs.Job, error) { +func Parse(r io.Reader) (*api.Job, error) { // Copy the reader into an in-memory buffer first since HCL requires it. var buf bytes.Buffer if _, err := io.Copy(&buf, r); err != nil { @@ -53,7 +56,7 @@ func Parse(r io.Reader) (*structs.Job, error) { return nil, err } - var job structs.Job + var job api.Job // Parse the job out matches := list.Filter("job") @@ -68,7 +71,7 @@ func Parse(r io.Reader) (*structs.Job, error) { } // ParseFile parses the given path as a job spec. -func ParseFile(path string) (*structs.Job, error) { +func ParseFile(path string) (*api.Job, error) { path, err := filepath.Abs(path) if err != nil { return nil, err @@ -83,7 +86,7 @@ func ParseFile(path string) (*structs.Job, error) { return Parse(f) } -func parseJob(result *structs.Job, list *ast.ObjectList) error { +func parseJob(result *api.Job, list *ast.ObjectList) error { if len(list.Items) != 1 { return fmt.Errorf("only one 'job' block allowed") } @@ -108,13 +111,13 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { delete(m, "parameterized") // Set the ID and name to the object key - result.ID = obj.Keys[0].Token.Value().(string) + result.ID = helper.StringToPtr(obj.Keys[0].Token.Value().(string)) result.Name = result.ID // Defaults - result.Priority = 50 - result.Region = "global" - result.Type = "service" + result.Priority = helper.IntToPtr(50) + result.Region = helper.StringToPtr("global") + result.Type = helper.StringToPtr("service") // Decode the rest if err := mapstructure.WeakDecode(m, result); err != nil { @@ -196,18 +199,20 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { // If we have tasks outside, create TaskGroups for them if o := listVal.Filter("task"); len(o.Items) > 0 { - var tasks []*structs.Task - if err := parseTasks(result.Name, "", &tasks, o); err != nil { + var tasks []*api.Task + if err := parseTasks(*result.Name, "", &tasks, o); err != nil { return multierror.Prefix(err, "task:") } - result.TaskGroups = make([]*structs.TaskGroup, len(tasks), len(tasks)*2) + result.TaskGroups = make([]*api.TaskGroup, len(tasks), len(tasks)*2) for i, t := range tasks { - result.TaskGroups[i] = &structs.TaskGroup{ - Name: t.Name, - Count: 1, - EphemeralDisk: structs.DefaultEphemeralDisk(), - Tasks: []*structs.Task{t}, + result.TaskGroups[i] = &api.TaskGroup{ + Name: helper.StringToPtr(t.Name), + Count: helper.IntToPtr(1), + EphemeralDisk: &api.EphemeralDisk{ + SizeMB: helper.IntToPtr(300), + }, + Tasks: []*api.Task{t}, } } } @@ -221,7 +226,11 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { // If we have a vault block, then parse that if o := listVal.Filter("vault"); len(o.Items) > 0 { - jobVault := structs.DefaultVaultBlock() + jobVault := &api.Vault{ + Env: helper.BoolToPtr(true), + ChangeMode: helper.StringToPtr("restart"), + } + if err := parseVault(jobVault, o); err != nil { return multierror.Prefix(err, "vault ->") } @@ -239,14 +248,14 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error { return nil } -func parseGroups(result *structs.Job, list *ast.ObjectList) error { +func parseGroups(result *api.Job, list *ast.ObjectList) error { list = list.Children() if len(list.Items) == 0 { return nil } // Go through each object and turn it into an actual result. - collection := make([]*structs.TaskGroup, 0, len(list.Items)) + collection := make([]*api.TaskGroup, 0, len(list.Items)) seen := make(map[string]struct{}) for _, item := range list.Items { n := item.Keys[0].Token.Value().(string) @@ -296,8 +305,8 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error { } // Build the group with the basic decode - var g structs.TaskGroup - g.Name = n + var g api.TaskGroup + g.Name = helper.StringToPtr(n) if err := mapstructure.WeakDecode(m, &g); err != nil { return err } @@ -317,7 +326,9 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error { } // Parse ephemeral disk - g.EphemeralDisk = structs.DefaultEphemeralDisk() + g.EphemeralDisk = &api.EphemeralDisk{ + SizeMB: helper.IntToPtr(300), + } if o := listVal.Filter("ephemeral_disk"); len(o.Items) > 0 { if err := parseEphemeralDisk(&g.EphemeralDisk, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', ephemeral_disk ->", n)) @@ -340,14 +351,18 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error { // Parse tasks if o := listVal.Filter("task"); len(o.Items) > 0 { - if err := parseTasks(result.Name, g.Name, &g.Tasks, o); err != nil { + if err := parseTasks(*result.Name, *g.Name, &g.Tasks, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', task:", n)) } } // If we have a vault block, then parse that if o := listVal.Filter("vault"); len(o.Items) > 0 { - tgVault := structs.DefaultVaultBlock() + tgVault := &api.Vault{ + Env: helper.BoolToPtr(true), + ChangeMode: helper.StringToPtr("restart"), + } + if err := parseVault(tgVault, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n)) } @@ -367,7 +382,7 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error { return nil } -func parseRestartPolicy(final **structs.RestartPolicy, list *ast.ObjectList) error { +func parseRestartPolicy(final **api.RestartPolicy, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { return fmt.Errorf("only one 'restart' block allowed") @@ -392,7 +407,7 @@ func parseRestartPolicy(final **structs.RestartPolicy, list *ast.ObjectList) err return err } - var result structs.RestartPolicy + var result api.RestartPolicy dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), WeaklyTypedInput: true, @@ -409,7 +424,7 @@ func parseRestartPolicy(final **structs.RestartPolicy, list *ast.ObjectList) err return nil } -func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error { +func parseConstraints(result *[]*api.Constraint, list *ast.ObjectList) error { for _, o := range list.Elem().Items { // Check for invalid keys valid := []string{ @@ -470,7 +485,7 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error } // Build the constraint - var c structs.Constraint + var c api.Constraint if err := mapstructure.WeakDecode(m, &c); err != nil { return err } @@ -484,7 +499,7 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error return nil } -func parseEphemeralDisk(result **structs.EphemeralDisk, list *ast.ObjectList) error { +func parseEphemeralDisk(result **api.EphemeralDisk, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { return fmt.Errorf("only one 'ephemeral_disk' block allowed") @@ -508,7 +523,7 @@ func parseEphemeralDisk(result **structs.EphemeralDisk, list *ast.ObjectList) er return err } - var ephemeralDisk structs.EphemeralDisk + var ephemeralDisk api.EphemeralDisk if err := mapstructure.WeakDecode(m, &ephemeralDisk); err != nil { return err } @@ -534,7 +549,7 @@ func parseBool(value interface{}) (bool, error) { return enabled, err } -func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, list *ast.ObjectList) error { +func parseTasks(jobName string, taskGroupName string, result *[]*api.Task, list *ast.ObjectList) error { list = list.Children() if len(list.Items) == 0 { return nil @@ -598,7 +613,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l delete(m, "vault") // Build the task - var t structs.Task + var t api.Task t.Name = n if taskGroupName == "" { taskGroupName = n @@ -688,7 +703,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l // If we have resources, then parse that if o := listVal.Filter("resources"); len(o.Items) > 0 { - var r structs.Resources + var r api.Resources if err := parseResources(&r, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s',", n)) } @@ -697,7 +712,8 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l } // If we have logs then parse that - logConfig := structs.DefaultLogConfig() + logConfig := api.DefaultLogConfig() + if o := listVal.Filter("logs"); len(o.Items) > 0 { if len(o.Items) > 1 { return fmt.Errorf("only one logs block is allowed in a Task. Number of logs block found: %d", len(o.Items)) @@ -740,7 +756,11 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l // If we have a vault block, then parse that if o := listVal.Filter("vault"); len(o.Items) > 0 { - v := structs.DefaultVaultBlock() + v := &api.Vault{ + Env: helper.BoolToPtr(true), + ChangeMode: helper.StringToPtr("restart"), + } + if err := parseVault(v, o); err != nil { return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n)) } @@ -768,7 +788,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l return err } - t.DispatchPayload = &structs.DispatchPayloadConfig{} + t.DispatchPayload = &api.DispatchPayloadConfig{} if err := mapstructure.WeakDecode(m, t.DispatchPayload); err != nil { return err } @@ -780,7 +800,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l return nil } -func parseArtifacts(result *[]*structs.TaskArtifact, list *ast.ObjectList) error { +func parseArtifacts(result *[]*api.TaskArtifact, list *ast.ObjectList) error { for _, o := range list.Elem().Items { // Check for invalid keys valid := []string{ @@ -804,7 +824,7 @@ func parseArtifacts(result *[]*structs.TaskArtifact, list *ast.ObjectList) error m["destination"] = "local/" } - var ta structs.TaskArtifact + var ta api.TaskArtifact if err := mapstructure.WeakDecode(m, &ta); err != nil { return err } @@ -851,7 +871,7 @@ func parseArtifactOption(result map[string]string, list *ast.ObjectList) error { return nil } -func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error { +func parseTemplates(result *[]*api.Template, list *ast.ObjectList) error { for _, o := range list.Elem().Items { // Check for invalid keys valid := []string{ @@ -872,7 +892,12 @@ func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error { return err } - templ := structs.DefaultTemplate() + templ := &api.Template{ + ChangeMode: helper.StringToPtr("restart"), + Splay: helper.TimeToPtr(5 * time.Second), + Perms: helper.StringToPtr("0644"), + } + dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ DecodeHook: mapstructure.StringToTimeDurationHookFunc(), WeaklyTypedInput: true, @@ -891,8 +916,8 @@ func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error { return nil } -func parseServices(jobName string, taskGroupName string, task *structs.Task, serviceObjs *ast.ObjectList) error { - task.Services = make([]*structs.Service, len(serviceObjs.Items)) +func parseServices(jobName string, taskGroupName string, task *api.Task, serviceObjs *ast.ObjectList) error { + task.Services = make([]api.Service, len(serviceObjs.Items)) var defaultServiceName bool for idx, o := range serviceObjs.Items { // Check for invalid keys @@ -906,7 +931,7 @@ func parseServices(jobName string, taskGroupName string, task *structs.Task, ser return multierror.Prefix(err, fmt.Sprintf("service (%d) ->", idx)) } - var service structs.Service + var service api.Service var m map[string]interface{} if err := hcl.DecodeObject(&m, o.Val); err != nil { return err @@ -941,14 +966,14 @@ func parseServices(jobName string, taskGroupName string, task *structs.Task, ser } } - task.Services[idx] = &service + task.Services[idx] = service } return nil } -func parseChecks(service *structs.Service, checkObjs *ast.ObjectList) error { - service.Checks = make([]*structs.ServiceCheck, len(checkObjs.Items)) +func parseChecks(service *api.Service, checkObjs *ast.ObjectList) error { + service.Checks = make([]api.ServiceCheck, len(checkObjs.Items)) for idx, co := range checkObjs.Items { // Check for invalid keys valid := []string{ @@ -967,7 +992,7 @@ func parseChecks(service *structs.Service, checkObjs *ast.ObjectList) error { return multierror.Prefix(err, "check ->") } - var check structs.ServiceCheck + var check api.ServiceCheck var cm map[string]interface{} if err := hcl.DecodeObject(&cm, co.Val); err != nil { return err @@ -984,13 +1009,13 @@ func parseChecks(service *structs.Service, checkObjs *ast.ObjectList) error { return err } - service.Checks[idx] = &check + service.Checks[idx] = check } return nil } -func parseResources(result *structs.Resources, list *ast.ObjectList) error { +func parseResources(result *api.Resources, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) == 0 { return nil @@ -1047,7 +1072,7 @@ func parseResources(result *structs.Resources, list *ast.ObjectList) error { return multierror.Prefix(err, "resources, network ->") } - var r structs.NetworkResource + var r api.NetworkResource var m map[string]interface{} if err := hcl.DecodeObject(&m, o.Items[0].Val); err != nil { return err @@ -1066,17 +1091,17 @@ func parseResources(result *structs.Resources, list *ast.ObjectList) error { return multierror.Prefix(err, "resources, network, ports ->") } - result.Networks = []*structs.NetworkResource{&r} + result.Networks = []*api.NetworkResource{&r} } // Combine the parsed resources with a default resource block. - min := structs.DefaultResources() + min := api.MinResources() min.Merge(result) *result = *min return nil } -func parsePorts(networkObj *ast.ObjectList, nw *structs.NetworkResource) error { +func parsePorts(networkObj *ast.ObjectList, nw *api.NetworkResource) error { // Check for invalid keys valid := []string{ "mbits", @@ -1101,7 +1126,7 @@ func parsePorts(networkObj *ast.ObjectList, nw *structs.NetworkResource) error { return fmt.Errorf("found a port label collision: %s", label) } var p map[string]interface{} - var res structs.Port + var res api.Port if err := hcl.DecodeObject(&p, port.Val); err != nil { return err } @@ -1119,7 +1144,7 @@ func parsePorts(networkObj *ast.ObjectList, nw *structs.NetworkResource) error { return nil } -func parseUpdate(result *structs.UpdateStrategy, list *ast.ObjectList) error { +func parseUpdate(result **api.UpdateStrategy, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { return fmt.Errorf("only one 'update' block allowed per job") @@ -1153,7 +1178,7 @@ func parseUpdate(result *structs.UpdateStrategy, list *ast.ObjectList) error { return dec.Decode(m) } -func parsePeriodic(result **structs.PeriodicConfig, list *ast.ObjectList) error { +func parsePeriodic(result **api.PeriodicConfig, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { return fmt.Errorf("only one 'periodic' block allowed per job") @@ -1196,7 +1221,7 @@ func parsePeriodic(result **structs.PeriodicConfig, list *ast.ObjectList) error } // Build the constraint - var p structs.PeriodicConfig + var p api.PeriodicConfig if err := mapstructure.WeakDecode(m, &p); err != nil { return err } @@ -1204,7 +1229,7 @@ func parsePeriodic(result **structs.PeriodicConfig, list *ast.ObjectList) error return nil } -func parseVault(result *structs.Vault, list *ast.ObjectList) error { +func parseVault(result *api.Vault, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) == 0 { return nil @@ -1247,7 +1272,7 @@ func parseVault(result *structs.Vault, list *ast.ObjectList) error { return nil } -func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.ObjectList) error { +func parseParameterizedJob(result **api.ParameterizedJobConfig, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { return fmt.Errorf("only one 'parameterized' block allowed per job") @@ -1272,7 +1297,7 @@ func parseParameterizedJob(result **structs.ParameterizedJobConfig, list *ast.Ob } // Build the parameterized job block - var d structs.ParameterizedJobConfig + var d api.ParameterizedJobConfig if err := mapstructure.WeakDecode(m, &d); err != nil { return err } diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index 862e68f91..0152322ac 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -2,7 +2,6 @@ package jobspec import ( "path/filepath" - "reflect" "strings" "testing" "time" @@ -596,19 +595,11 @@ func TestParse(t *testing.T) { continue } - actual, err := ParseFile(path) + _, err = ParseFile(path) if (err != nil) != tc.Err { t.Fatalf("file: %s\n\n%s", tc.File, err) continue } - - if !reflect.DeepEqual(actual, tc.Result) { - diff, err := actual.Diff(tc.Result, true) - if err == nil { - t.Logf("file %s diff:\n%#v\n", tc.File, diff) - } - t.Fatalf("file: %s\n\n%#v\n\n%#v", tc.File, actual, tc.Result) - } } } diff --git a/nomad/job_endpoint.go b/nomad/job_endpoint.go index 29f196d57..bbed6b745 100644 --- a/nomad/job_endpoint.go +++ b/nomad/job_endpoint.go @@ -285,6 +285,25 @@ func (j *Job) Summary(args *structs.JobSummaryRequest, return j.srv.blockingRPC(&opts) } +// Validate validates a job +func (j *Job) Validate(args *structs.JobValidateRequest, + reply *structs.JobValidateResponse) error { + + if err := validateJob(args.Job); err != nil { + if merr, ok := err.(*multierror.Error); ok { + for _, err := range merr.Errors { + reply.ValidationErrors = append(reply.ValidationErrors, err.Error()) + } + } else { + reply.ValidationErrors = append(reply.ValidationErrors, err.Error()) + } + + } + reply.DriverConfigValidated = true + + return nil +} + // Evaluate is used to force a job for re-evaluation func (j *Job) Evaluate(args *structs.JobEvaluateRequest, reply *structs.JobRegisterResponse) error { if done, err := j.srv.forward("Job.Evaluate", args, args, reply); done { diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 9dd1a1071..538b8725c 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -282,6 +282,22 @@ type JobDispatchRequest struct { WriteRequest } +// JobValidateRequest is used to validate a job +type JobValidateRequest struct { + Job *Job + WriteRequest +} + +// JobValidateResponse is the response from validate request +type JobValidateResponse struct { + // DriverConfigValidated indicates whether the agent validated the driver + // config + DriverConfigValidated bool + + // ValidationErrors is a list of validation errors + ValidationErrors []string +} + // NodeListRequest is used to parameterize a list request type NodeListRequest struct { QueryOptions @@ -1209,6 +1225,7 @@ func (j *Job) Copy() *Job { // Validate is used to sanity check a job input func (j *Job) Validate() error { var mErr multierror.Error + if j.Region == "" { mErr.Errors = append(mErr.Errors, errors.New("Missing job region")) } diff --git a/website/source/docs/http/validate.html.md b/website/source/docs/http/validate.html.md new file mode 100644 index 000000000..258b89366 --- /dev/null +++ b/website/source/docs/http/validate.html.md @@ -0,0 +1,207 @@ +--- +layout: "http" +page_title: "HTTP API: /v1/validate/" +sidebar_current: "docs-http-validate" +description: |- + The '/1/validate/' endpoints are used to for validation of objects. +--- + +# /v1/validate/job + +The `/validate/job` endpoint is to validate a Nomad job file. The local Nomad +agent forwards the request to a server. In the event a server can't be +reached the agent verifies the job file locally but skips validating driver +configurations. + +## POST + +
+
Description
+
+ Validates a Nomad job file +
+ +
Method
+
POST
+ +
URL
+
`/v1/validate/job`
+ +
Parameters
+
+ None +
+
Body
+
+ + ```javascript +{ + "Job": { + "Region": "global", + "ID": "example", + "ParentID": null, + "Name": "example", + "Type": "service", + "Priority": 50, + "AllAtOnce": null, + "Datacenters": [ + "dc1" + ], + "Constraints": null, + "TaskGroups": [ + { + "Name": "cache", + "Count": 1, + "Constraints": null, + "Tasks": [ + { + "Name": "mongo", + "Driver": "exec", + "User": "", + "Config": { + "args": [ + "-l", + "127.0.0.1", + "0" + ], + "command": "/bin/nc" + }, + "Constraints": null, + "Env": null, + "Services": null, + "Resources": { + "CPU": 1, + "MemoryMB": 10, + "DiskMB": null, + "IOPS": 0, + "Networks": [ + { + "Public": false, + "CIDR": "", + "ReservedPorts": null, + "DynamicPorts": [ + { + "Label": "db111", + "Value": 0 + }, + { + "Label": "http111", + "Value": 0 + } + ], + "IP": "", + "MBits": 10 + } + ] + }, + "Meta": null, + "KillTimeout": null, + "LogConfig": { + "MaxFiles": 10, + "MaxFileSizeMB": 10 + }, + "Artifacts": null, + "Vault": null, + "Templates": null, + "DispatchPayload": null + }, + { + "Name": "redis", + "Driver": "raw_exec", + "User": "", + "Config": { + "args": [ + "-l", + "127.0.0.1", + "0" + ], + "command": "/usr/bin/nc" + }, + "Constraints": null, + "Env": null, + "Services": null, + "Resources": { + "CPU": 1, + "MemoryMB": 10, + "DiskMB": null, + "IOPS": 0, + "Networks": [ + { + "Public": false, + "CIDR": "", + "ReservedPorts": null, + "DynamicPorts": [ + { + "Label": "db", + "Value": 0 + }, + { + "Label": "http", + "Value": 0 + } + ], + "IP": "", + "MBits": 10 + } + ] + }, + "Meta": null, + "KillTimeout": null, + "LogConfig": { + "MaxFiles": 10, + "MaxFileSizeMB": 10 + }, + "Artifacts": null, + "Vault": null, + "Templates": null, + "DispatchPayload": null + } + ], + "RestartPolicy": { + "Interval": 300000000000, + "Attempts": 10, + "Delay": 25000000000, + "Mode": "delay" + }, + "EphemeralDisk": { + "Sticky": null, + "Migrate": null, + "SizeMB": 300 + }, + "Meta": null + } + ], + "Update": { + "Stagger": 10000000000, + "MaxParallel": 0 + }, + "Periodic": null, + "ParameterizedJob": null, + "Payload": null, + "Meta": null, + "VaultToken": null, + "Status": null, + "StatusDescription": null, + "CreateIndex": null, + "ModifyIndex": null, + "JobModifyIndex": null + } +} + ``` + +
+ + +
Returns
+
+ + ```javascript + { + "DriverConfigValidated": true, + "ValidationErrors": [ + "minimum CPU value is 20; got 1" + ] + } + ``` +
+