Added tests
This commit is contained in:
parent
7567209857
commit
d0d7c92bab
|
@ -21,7 +21,7 @@ func TestCompose(t *testing.T) {
|
|||
Networks: []*NetworkResource{
|
||||
&NetworkResource{
|
||||
CIDR: "0.0.0.0/0",
|
||||
MBits: 100,
|
||||
MBits: helper.IntToPtr(100),
|
||||
ReservedPorts: []Port{{"", 80}, {"", 443}},
|
||||
},
|
||||
},
|
||||
|
@ -83,7 +83,7 @@ func TestCompose(t *testing.T) {
|
|||
Networks: []*NetworkResource{
|
||||
&NetworkResource{
|
||||
CIDR: "0.0.0.0/0",
|
||||
MBits: 100,
|
||||
MBits: helper.IntToPtr(100),
|
||||
ReservedPorts: []Port{
|
||||
{"", 80},
|
||||
{"", 443},
|
||||
|
|
|
@ -304,6 +304,9 @@ func (j *Job) Canonicalize() {
|
|||
if j.Name == nil {
|
||||
j.Name = j.ID
|
||||
}
|
||||
if j.ParentID == nil {
|
||||
j.ParentID = helper.StringToPtr("")
|
||||
}
|
||||
|
||||
if j.Priority == nil {
|
||||
j.Priority = helper.IntToPtr(50)
|
||||
|
|
133
api/jobs_test.go
133
api/jobs_test.go
|
@ -5,6 +5,7 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
|
@ -78,6 +79,134 @@ func TestJobs_Validate(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestJobs_Canonicalize(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
expected *Job
|
||||
input *Job
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
input: &Job{
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Tasks: []*Task{
|
||||
{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Job{
|
||||
ID: helper.StringToPtr(""),
|
||||
Name: helper.StringToPtr(""),
|
||||
Region: helper.StringToPtr("global"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
ParentID: helper.StringToPtr(""),
|
||||
Priority: helper.IntToPtr(50),
|
||||
AllAtOnce: helper.BoolToPtr(false),
|
||||
VaultToken: helper.StringToPtr(""),
|
||||
Status: helper.StringToPtr(""),
|
||||
StatusDescription: helper.StringToPtr(""),
|
||||
CreateIndex: helper.Uint64ToPtr(0),
|
||||
ModifyIndex: helper.Uint64ToPtr(0),
|
||||
JobModifyIndex: helper.Uint64ToPtr(0),
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr(""),
|
||||
Count: helper.IntToPtr(1),
|
||||
EphemeralDisk: &EphemeralDisk{
|
||||
Sticky: helper.BoolToPtr(false),
|
||||
Migrate: helper.BoolToPtr(false),
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
},
|
||||
RestartPolicy: &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(15 * time.Second),
|
||||
Attempts: helper.IntToPtr(2),
|
||||
Interval: helper.TimeToPtr(1 * time.Minute),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
},
|
||||
Tasks: []*Task{
|
||||
{
|
||||
KillTimeout: helper.TimeToPtr(5 * time.Second),
|
||||
LogConfig: DefaultLogConfig(),
|
||||
Resources: MinResources(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "partial",
|
||||
input: &Job{
|
||||
Name: helper.StringToPtr("foo"),
|
||||
ID: helper.StringToPtr("bar"),
|
||||
ParentID: helper.StringToPtr("lol"),
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("bar"),
|
||||
Tasks: []*Task{
|
||||
{
|
||||
Name: "task1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: &Job{
|
||||
ID: helper.StringToPtr("bar"),
|
||||
Name: helper.StringToPtr("foo"),
|
||||
Region: helper.StringToPtr("global"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
ParentID: helper.StringToPtr("lol"),
|
||||
Priority: helper.IntToPtr(50),
|
||||
AllAtOnce: helper.BoolToPtr(false),
|
||||
VaultToken: helper.StringToPtr(""),
|
||||
Status: helper.StringToPtr(""),
|
||||
StatusDescription: helper.StringToPtr(""),
|
||||
CreateIndex: helper.Uint64ToPtr(0),
|
||||
ModifyIndex: helper.Uint64ToPtr(0),
|
||||
JobModifyIndex: helper.Uint64ToPtr(0),
|
||||
TaskGroups: []*TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("bar"),
|
||||
Count: helper.IntToPtr(1),
|
||||
EphemeralDisk: &EphemeralDisk{
|
||||
Sticky: helper.BoolToPtr(false),
|
||||
Migrate: helper.BoolToPtr(false),
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
},
|
||||
RestartPolicy: &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(15 * time.Second),
|
||||
Attempts: helper.IntToPtr(2),
|
||||
Interval: helper.TimeToPtr(1 * time.Minute),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
},
|
||||
Tasks: []*Task{
|
||||
{
|
||||
Name: "task1",
|
||||
LogConfig: DefaultLogConfig(),
|
||||
Resources: MinResources(),
|
||||
KillTimeout: helper.TimeToPtr(5 * time.Second),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tc.input.Canonicalize()
|
||||
if !reflect.DeepEqual(tc.input, tc.expected) {
|
||||
t.Fatalf("Name: %v, expected: %#v, actual: %#v", tc.name, tc.expected, tc.input)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJobs_EnforceRegister(t *testing.T) {
|
||||
c, s := makeClient(t, nil, nil)
|
||||
defer s.Stop()
|
||||
|
@ -174,7 +303,7 @@ func TestJobs_Info(t *testing.T) {
|
|||
assertQueryMeta(t, qm)
|
||||
|
||||
// Check that the result is what we expect
|
||||
if result == nil || result.ID != job.ID {
|
||||
if result == nil || *result.ID != *job.ID {
|
||||
t.Fatalf("expect: %#v, got: %#v", job, result)
|
||||
}
|
||||
}
|
||||
|
@ -416,7 +545,7 @@ func TestJobs_PeriodicForce(t *testing.T) {
|
|||
|
||||
testutil.WaitForResult(func() (bool, error) {
|
||||
out, _, err := jobs.Info(*job.ID, nil)
|
||||
if err != nil || out == nil || out.ID != job.ID {
|
||||
if err != nil || out == nil || *out.ID != *job.ID {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
|
|
|
@ -12,6 +12,12 @@ type Resources struct {
|
|||
Networks []*NetworkResource
|
||||
}
|
||||
|
||||
func (r *Resources) Canonicalize() {
|
||||
for _, n := range r.Networks {
|
||||
n.Canonicalize()
|
||||
}
|
||||
}
|
||||
|
||||
func MinResources() *Resources {
|
||||
return &Resources{
|
||||
CPU: helper.IntToPtr(100),
|
||||
|
@ -56,5 +62,11 @@ type NetworkResource struct {
|
|||
ReservedPorts []Port
|
||||
DynamicPorts []Port
|
||||
IP string
|
||||
MBits int
|
||||
MBits *int
|
||||
}
|
||||
|
||||
func (n *NetworkResource) Canonicalize() {
|
||||
if n.MBits == nil {
|
||||
n.MBits = helper.IntToPtr(10)
|
||||
}
|
||||
}
|
||||
|
|
77
api/tasks.go
77
api/tasks.go
|
@ -1,6 +1,7 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
|
@ -53,10 +54,25 @@ type AllocResourceUsage struct {
|
|||
// RestartPolicy defines how the Nomad client restarts
|
||||
// tasks in a taskgroup when they fail
|
||||
type RestartPolicy struct {
|
||||
Interval time.Duration
|
||||
Attempts int
|
||||
Delay time.Duration
|
||||
Mode string
|
||||
Interval *time.Duration
|
||||
Attempts *int
|
||||
Delay *time.Duration
|
||||
Mode *string
|
||||
}
|
||||
|
||||
func (r *RestartPolicy) Merge(rp *RestartPolicy) {
|
||||
if rp.Interval != nil {
|
||||
r.Interval = rp.Interval
|
||||
}
|
||||
if rp.Attempts != nil {
|
||||
r.Attempts = rp.Attempts
|
||||
}
|
||||
if rp.Delay != nil {
|
||||
r.Delay = rp.Delay
|
||||
}
|
||||
if rp.Mode != nil {
|
||||
r.Mode = rp.Mode
|
||||
}
|
||||
}
|
||||
|
||||
// The ServiceCheck data model represents the consul health check that
|
||||
|
@ -145,24 +161,29 @@ func (g *TaskGroup) Canonicalize(jobType string) {
|
|||
} else {
|
||||
g.EphemeralDisk.Canonicalize()
|
||||
}
|
||||
if g.RestartPolicy == nil {
|
||||
switch jobType {
|
||||
case "service", "system":
|
||||
g.RestartPolicy = &RestartPolicy{
|
||||
Delay: 15 * time.Second,
|
||||
Attempts: 2,
|
||||
Interval: 1 * time.Minute,
|
||||
Mode: "delay",
|
||||
}
|
||||
default:
|
||||
g.RestartPolicy = &RestartPolicy{
|
||||
Delay: 15 * time.Second,
|
||||
Attempts: 15,
|
||||
Interval: 7 * 24 * time.Hour,
|
||||
Mode: "delay",
|
||||
}
|
||||
|
||||
var defaultRestartPolicy *RestartPolicy
|
||||
switch jobType {
|
||||
case "service", "system":
|
||||
defaultRestartPolicy = &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(15 * time.Second),
|
||||
Attempts: helper.IntToPtr(2),
|
||||
Interval: helper.TimeToPtr(1 * time.Minute),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
}
|
||||
default:
|
||||
defaultRestartPolicy = &RestartPolicy{
|
||||
Delay: helper.TimeToPtr(15 * time.Second),
|
||||
Attempts: helper.IntToPtr(15),
|
||||
Interval: helper.TimeToPtr(7 * 24 * time.Hour),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
}
|
||||
}
|
||||
|
||||
if g.RestartPolicy != nil {
|
||||
defaultRestartPolicy.Merge(g.RestartPolicy)
|
||||
}
|
||||
g.RestartPolicy = defaultRestartPolicy
|
||||
}
|
||||
|
||||
// Constrain is used to add a constraint to a task group.
|
||||
|
@ -230,7 +251,7 @@ type Task struct {
|
|||
Services []Service
|
||||
Resources *Resources
|
||||
Meta map[string]string
|
||||
KillTimeout time.Duration
|
||||
KillTimeout *time.Duration
|
||||
LogConfig *LogConfig
|
||||
Artifacts []*TaskArtifact
|
||||
Vault *Vault
|
||||
|
@ -255,8 +276,13 @@ func (t *Task) Canonicalize() {
|
|||
tmpl.Canonicalize()
|
||||
}
|
||||
|
||||
if t.KillTimeout == nil {
|
||||
t.KillTimeout = helper.TimeToPtr(5 * time.Second)
|
||||
}
|
||||
|
||||
min := MinResources()
|
||||
min.Merge(t.Resources)
|
||||
min.Canonicalize()
|
||||
t.Resources = min
|
||||
}
|
||||
|
||||
|
@ -293,6 +319,13 @@ func (tmpl *Template) Canonicalize() {
|
|||
if tmpl.Perms == nil {
|
||||
tmpl.Perms = helper.StringToPtr("0644")
|
||||
}
|
||||
if *tmpl.ChangeMode == "signal" && tmpl.ChangeSignal == nil {
|
||||
tmpl.ChangeSignal = helper.StringToPtr("SIGHUP")
|
||||
}
|
||||
if tmpl.ChangeSignal != nil {
|
||||
sig := *tmpl.ChangeSignal
|
||||
tmpl.ChangeSignal = helper.StringToPtr(strings.ToUpper(sig))
|
||||
}
|
||||
}
|
||||
|
||||
type Vault struct {
|
||||
|
@ -310,7 +343,7 @@ func (v *Vault) Canonicalize() {
|
|||
v.ChangeMode = helper.StringToPtr("restart")
|
||||
}
|
||||
if v.ChangeSignal == nil {
|
||||
v.ChangeSignal = helper.StringToPtr("sighup")
|
||||
v.ChangeSignal = helper.StringToPtr("SIGHUP")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ func TestTask_Require(t *testing.T) {
|
|||
Networks: []*NetworkResource{
|
||||
&NetworkResource{
|
||||
CIDR: "0.0.0.0/0",
|
||||
MBits: 100,
|
||||
MBits: helper.IntToPtr(100),
|
||||
ReservedPorts: []Port{{"", 80}, {"", 443}},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -127,7 +127,7 @@ func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Requ
|
|||
return nil, CodedError(400, "Job must be specified")
|
||||
}
|
||||
|
||||
job := s.apiJobToStructJob(validateRequest.Job)
|
||||
job := apiJobToStructJob(validateRequest.Job)
|
||||
args := structs.JobValidateRequest{
|
||||
Job: job,
|
||||
WriteRequest: structs.WriteRequest{
|
||||
|
@ -142,13 +142,13 @@ func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Requ
|
|||
// Fall back to do local validation
|
||||
args.Job.Canonicalize()
|
||||
if vErr := args.Job.Validate(); vErr != nil {
|
||||
if merr, ok := err.(*multierror.Error); ok {
|
||||
for _, err := range merr.Errors {
|
||||
out.ValidationErrors = append(out.ValidationErrors, err.Error())
|
||||
if merr, ok := vErr.(*multierror.Error); ok {
|
||||
for _, e := range merr.Errors {
|
||||
out.ValidationErrors = append(out.ValidationErrors, e.Error())
|
||||
}
|
||||
} else {
|
||||
out.ValidationErrors = append(out.ValidationErrors, vErr.Error())
|
||||
}
|
||||
} else {
|
||||
out.ValidationErrors = append(out.ValidationErrors, vErr.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -355,13 +355,13 @@ func (s *HTTPServer) jobDispatchRequest(resp http.ResponseWriter, req *http.Requ
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (s *HTTPServer) apiJobToStructJob(job *api.Job) *structs.Job {
|
||||
func apiJobToStructJob(job *api.Job) *structs.Job {
|
||||
job.Canonicalize()
|
||||
|
||||
j := &structs.Job{
|
||||
Region: *job.Region,
|
||||
ID: *job.ID,
|
||||
ParentID: *job.ID,
|
||||
ParentID: *job.ParentID,
|
||||
Name: *job.Name,
|
||||
Type: *job.Type,
|
||||
Priority: *job.Priority,
|
||||
|
@ -374,16 +374,14 @@ func (s *HTTPServer) apiJobToStructJob(job *api.Job) *structs.Job {
|
|||
StatusDescription: *job.StatusDescription,
|
||||
CreateIndex: *job.CreateIndex,
|
||||
ModifyIndex: *job.ModifyIndex,
|
||||
JobModifyIndex: *job.ModifyIndex,
|
||||
JobModifyIndex: *job.JobModifyIndex,
|
||||
}
|
||||
|
||||
j.Constraints = make([]*structs.Constraint, len(job.Constraints))
|
||||
for i, c := range job.Constraints {
|
||||
j.Constraints[i] = &structs.Constraint{
|
||||
LTarget: c.LTarget,
|
||||
RTarget: c.RTarget,
|
||||
Operand: c.Operand,
|
||||
}
|
||||
con := &structs.Constraint{}
|
||||
apiConstraintToStructs(c, con)
|
||||
j.Constraints[i] = con
|
||||
}
|
||||
if job.Update != nil {
|
||||
j.Update = structs.UpdateStrategy{
|
||||
|
@ -393,10 +391,12 @@ func (s *HTTPServer) apiJobToStructJob(job *api.Job) *structs.Job {
|
|||
}
|
||||
if job.Periodic != nil {
|
||||
j.Periodic = &structs.PeriodicConfig{
|
||||
Enabled: j.Periodic.Enabled,
|
||||
Spec: j.Periodic.Spec,
|
||||
SpecType: j.Periodic.SpecType,
|
||||
ProhibitOverlap: j.Periodic.ProhibitOverlap,
|
||||
Enabled: *job.Periodic.Enabled,
|
||||
SpecType: *job.Periodic.SpecType,
|
||||
ProhibitOverlap: *job.Periodic.ProhibitOverlap,
|
||||
}
|
||||
if job.Periodic.Spec != nil {
|
||||
j.Periodic.Spec = *job.Periodic.Spec
|
||||
}
|
||||
}
|
||||
if job.ParameterizedJob != nil {
|
||||
|
@ -410,30 +410,28 @@ func (s *HTTPServer) apiJobToStructJob(job *api.Job) *structs.Job {
|
|||
j.TaskGroups = make([]*structs.TaskGroup, len(job.TaskGroups))
|
||||
for i, taskGroup := range job.TaskGroups {
|
||||
tg := &structs.TaskGroup{}
|
||||
s.apiTgToStructsTG(taskGroup, tg)
|
||||
apiTgToStructsTG(taskGroup, tg)
|
||||
j.TaskGroups[i] = tg
|
||||
}
|
||||
|
||||
return j
|
||||
}
|
||||
|
||||
func (s *HTTPServer) apiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) {
|
||||
func apiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.TaskGroup) {
|
||||
tg.Name = *taskGroup.Name
|
||||
tg.Count = *taskGroup.Count
|
||||
tg.Meta = taskGroup.Meta
|
||||
tg.Constraints = make([]*structs.Constraint, len(taskGroup.Constraints))
|
||||
for k, constraint := range taskGroup.Constraints {
|
||||
tg.Constraints[k] = &structs.Constraint{
|
||||
LTarget: constraint.LTarget,
|
||||
RTarget: constraint.RTarget,
|
||||
Operand: constraint.Operand,
|
||||
}
|
||||
c := &structs.Constraint{}
|
||||
apiConstraintToStructs(constraint, c)
|
||||
tg.Constraints[k] = c
|
||||
}
|
||||
tg.RestartPolicy = &structs.RestartPolicy{
|
||||
Attempts: taskGroup.RestartPolicy.Attempts,
|
||||
Interval: taskGroup.RestartPolicy.Interval,
|
||||
Delay: taskGroup.RestartPolicy.Delay,
|
||||
Mode: taskGroup.RestartPolicy.Mode,
|
||||
Attempts: *taskGroup.RestartPolicy.Attempts,
|
||||
Interval: *taskGroup.RestartPolicy.Interval,
|
||||
Delay: *taskGroup.RestartPolicy.Delay,
|
||||
Mode: *taskGroup.RestartPolicy.Mode,
|
||||
}
|
||||
tg.EphemeralDisk = &structs.EphemeralDisk{
|
||||
Sticky: *taskGroup.EphemeralDisk.Sticky,
|
||||
|
@ -444,23 +442,21 @@ func (s *HTTPServer) apiTgToStructsTG(taskGroup *api.TaskGroup, tg *structs.Task
|
|||
tg.Tasks = make([]*structs.Task, len(taskGroup.Tasks))
|
||||
for l, task := range taskGroup.Tasks {
|
||||
t := &structs.Task{}
|
||||
s.apiTaskToStructsTask(task, t)
|
||||
apiTaskToStructsTask(task, t)
|
||||
tg.Tasks[l] = t
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HTTPServer) apiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) {
|
||||
structsTask.Name = apiTask.Driver
|
||||
func apiTaskToStructsTask(apiTask *api.Task, structsTask *structs.Task) {
|
||||
structsTask.Name = apiTask.Name
|
||||
structsTask.Driver = apiTask.Driver
|
||||
structsTask.User = apiTask.User
|
||||
structsTask.Config = apiTask.Config
|
||||
structsTask.Constraints = make([]*structs.Constraint, len(apiTask.Constraints))
|
||||
for i, constraint := range apiTask.Constraints {
|
||||
structsTask.Constraints[i] = &structs.Constraint{
|
||||
LTarget: constraint.LTarget,
|
||||
RTarget: constraint.RTarget,
|
||||
Operand: constraint.Operand,
|
||||
}
|
||||
c := &structs.Constraint{}
|
||||
apiConstraintToStructs(constraint, c)
|
||||
structsTask.Constraints[i] = c
|
||||
}
|
||||
structsTask.Env = apiTask.Env
|
||||
structsTask.Services = make([]*structs.Service, len(apiTask.Services))
|
||||
|
@ -496,10 +492,10 @@ func (s *HTTPServer) apiTaskToStructsTask(apiTask *api.Task, structsTask *struct
|
|||
structsTask.Resources.Networks[i] = &structs.NetworkResource{
|
||||
CIDR: nw.CIDR,
|
||||
IP: nw.IP,
|
||||
MBits: nw.MBits,
|
||||
MBits: *nw.MBits,
|
||||
}
|
||||
structsTask.Resources.Networks[i].DynamicPorts = make([]structs.Port, len(structsTask.Resources.Networks[i].DynamicPorts))
|
||||
structsTask.Resources.Networks[i].ReservedPorts = make([]structs.Port, len(structsTask.Resources.Networks[i].ReservedPorts))
|
||||
structsTask.Resources.Networks[i].DynamicPorts = make([]structs.Port, len(nw.DynamicPorts))
|
||||
structsTask.Resources.Networks[i].ReservedPorts = make([]structs.Port, len(nw.ReservedPorts))
|
||||
for j, dp := range nw.DynamicPorts {
|
||||
structsTask.Resources.Networks[i].DynamicPorts[j] = structs.Port{
|
||||
Label: dp.Label,
|
||||
|
@ -514,7 +510,7 @@ func (s *HTTPServer) apiTaskToStructsTask(apiTask *api.Task, structsTask *struct
|
|||
}
|
||||
}
|
||||
structsTask.Meta = apiTask.Meta
|
||||
structsTask.KillTimeout = apiTask.KillTimeout
|
||||
structsTask.KillTimeout = *apiTask.KillTimeout
|
||||
structsTask.LogConfig = &structs.LogConfig{
|
||||
MaxFiles: *apiTask.LogConfig.MaxFiles,
|
||||
MaxFileSizeMB: *apiTask.LogConfig.MaxFileSizeMB,
|
||||
|
@ -553,3 +549,9 @@ func (s *HTTPServer) apiTaskToStructsTask(apiTask *api.Task, structsTask *struct
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func apiConstraintToStructs(c1 *api.Constraint, c2 *structs.Constraint) {
|
||||
c2.LTarget = c1.LTarget
|
||||
c2.RTarget = c1.RTarget
|
||||
c2.Operand = c1.Operand
|
||||
}
|
||||
|
|
|
@ -5,8 +5,11 @@ import (
|
|||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/nomad/mock"
|
||||
"github.com/hashicorp/nomad/nomad/structs"
|
||||
)
|
||||
|
@ -622,3 +625,355 @@ func TestHTTP_JobDispatch(t *testing.T) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestJobs_ApiJobToStructsJob(t *testing.T) {
|
||||
apiJob := &api.Job{
|
||||
Region: helper.StringToPtr("global"),
|
||||
ID: helper.StringToPtr("foo"),
|
||||
ParentID: helper.StringToPtr("lol"),
|
||||
Name: helper.StringToPtr("name"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
Priority: helper.IntToPtr(50),
|
||||
AllAtOnce: helper.BoolToPtr(true),
|
||||
Datacenters: []string{"dc1", "dc2"},
|
||||
Constraints: []*api.Constraint{
|
||||
{
|
||||
LTarget: "a",
|
||||
RTarget: "b",
|
||||
Operand: "c",
|
||||
},
|
||||
},
|
||||
Update: &api.UpdateStrategy{
|
||||
Stagger: 1 * time.Second,
|
||||
MaxParallel: 5,
|
||||
},
|
||||
Periodic: &api.PeriodicConfig{
|
||||
Enabled: helper.BoolToPtr(true),
|
||||
Spec: helper.StringToPtr("spec"),
|
||||
SpecType: helper.StringToPtr("cron"),
|
||||
ProhibitOverlap: helper.BoolToPtr(true),
|
||||
},
|
||||
ParameterizedJob: &api.ParameterizedJobConfig{
|
||||
Payload: "payload",
|
||||
MetaRequired: []string{"a", "b"},
|
||||
MetaOptional: []string{"c", "d"},
|
||||
},
|
||||
Payload: []byte("payload"),
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
TaskGroups: []*api.TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("group1"),
|
||||
Count: helper.IntToPtr(5),
|
||||
Constraints: []*api.Constraint{
|
||||
{
|
||||
LTarget: "x",
|
||||
RTarget: "y",
|
||||
Operand: "z",
|
||||
},
|
||||
},
|
||||
RestartPolicy: &api.RestartPolicy{
|
||||
Interval: helper.TimeToPtr(1 * time.Second),
|
||||
Attempts: helper.IntToPtr(5),
|
||||
Delay: helper.TimeToPtr(10 * time.Second),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
},
|
||||
EphemeralDisk: &api.EphemeralDisk{
|
||||
SizeMB: helper.IntToPtr(100),
|
||||
Sticky: helper.BoolToPtr(true),
|
||||
Migrate: helper.BoolToPtr(true),
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
Tasks: []*api.Task{
|
||||
{
|
||||
Name: "task1",
|
||||
Driver: "docker",
|
||||
User: "mary",
|
||||
Config: map[string]interface{}{
|
||||
"lol": "code",
|
||||
},
|
||||
Env: map[string]string{
|
||||
"hello": "world",
|
||||
},
|
||||
Constraints: []*api.Constraint{
|
||||
{
|
||||
LTarget: "x",
|
||||
RTarget: "y",
|
||||
Operand: "z",
|
||||
},
|
||||
},
|
||||
|
||||
Services: []api.Service{
|
||||
{
|
||||
Id: "id",
|
||||
Name: "serviceA",
|
||||
Tags: []string{"1", "2"},
|
||||
PortLabel: "foo",
|
||||
Checks: []api.ServiceCheck{
|
||||
{
|
||||
Id: "hello",
|
||||
Name: "bar",
|
||||
Type: "http",
|
||||
Command: "foo",
|
||||
Args: []string{"a", "b"},
|
||||
Path: "/check",
|
||||
Protocol: "http",
|
||||
PortLabel: "foo",
|
||||
Interval: 4 * time.Second,
|
||||
Timeout: 2 * time.Second,
|
||||
InitialStatus: "ok",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: &api.Resources{
|
||||
CPU: helper.IntToPtr(100),
|
||||
MemoryMB: helper.IntToPtr(10),
|
||||
Networks: []*api.NetworkResource{
|
||||
{
|
||||
IP: "10.10.11.1",
|
||||
MBits: helper.IntToPtr(10),
|
||||
ReservedPorts: []api.Port{
|
||||
{
|
||||
Label: "http",
|
||||
Value: 80,
|
||||
},
|
||||
},
|
||||
DynamicPorts: []api.Port{
|
||||
{
|
||||
Label: "ssh",
|
||||
Value: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"lol": "code",
|
||||
},
|
||||
KillTimeout: helper.TimeToPtr(10 * time.Second),
|
||||
LogConfig: &api.LogConfig{
|
||||
MaxFiles: helper.IntToPtr(10),
|
||||
MaxFileSizeMB: helper.IntToPtr(100),
|
||||
},
|
||||
Artifacts: []*api.TaskArtifact{
|
||||
{
|
||||
GetterSource: helper.StringToPtr("source"),
|
||||
GetterOptions: map[string]string{
|
||||
"a": "b",
|
||||
},
|
||||
RelativeDest: helper.StringToPtr("dest"),
|
||||
},
|
||||
},
|
||||
Vault: &api.Vault{
|
||||
Policies: []string{"a", "b", "c"},
|
||||
Env: helper.BoolToPtr(true),
|
||||
ChangeMode: helper.StringToPtr("c"),
|
||||
ChangeSignal: helper.StringToPtr("sighup"),
|
||||
},
|
||||
Templates: []*api.Template{
|
||||
{
|
||||
SourcePath: helper.StringToPtr("source"),
|
||||
DestPath: helper.StringToPtr("dest"),
|
||||
EmbeddedTmpl: helper.StringToPtr("embedded"),
|
||||
ChangeMode: helper.StringToPtr("change"),
|
||||
ChangeSignal: helper.StringToPtr("signal"),
|
||||
Splay: helper.TimeToPtr(1 * time.Minute),
|
||||
Perms: helper.StringToPtr("666"),
|
||||
},
|
||||
},
|
||||
DispatchPayload: &api.DispatchPayloadConfig{
|
||||
File: "fileA",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VaultToken: helper.StringToPtr("token"),
|
||||
Status: helper.StringToPtr("status"),
|
||||
StatusDescription: helper.StringToPtr("status_desc"),
|
||||
CreateIndex: helper.Uint64ToPtr(1),
|
||||
ModifyIndex: helper.Uint64ToPtr(3),
|
||||
JobModifyIndex: helper.Uint64ToPtr(5),
|
||||
}
|
||||
|
||||
expected := &structs.Job{
|
||||
Region: "global",
|
||||
ID: "foo",
|
||||
ParentID: "lol",
|
||||
Name: "name",
|
||||
Type: "service",
|
||||
Priority: 50,
|
||||
AllAtOnce: true,
|
||||
Datacenters: []string{"dc1", "dc2"},
|
||||
Constraints: []*structs.Constraint{
|
||||
{
|
||||
LTarget: "a",
|
||||
RTarget: "b",
|
||||
Operand: "c",
|
||||
},
|
||||
},
|
||||
Update: structs.UpdateStrategy{
|
||||
Stagger: 1 * time.Second,
|
||||
MaxParallel: 5,
|
||||
},
|
||||
Periodic: &structs.PeriodicConfig{
|
||||
Enabled: true,
|
||||
Spec: "spec",
|
||||
SpecType: "cron",
|
||||
ProhibitOverlap: true,
|
||||
},
|
||||
ParameterizedJob: &structs.ParameterizedJobConfig{
|
||||
Payload: "payload",
|
||||
MetaRequired: []string{"a", "b"},
|
||||
MetaOptional: []string{"c", "d"},
|
||||
},
|
||||
Payload: []byte("payload"),
|
||||
Meta: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
TaskGroups: []*structs.TaskGroup{
|
||||
{
|
||||
Name: "group1",
|
||||
Count: 5,
|
||||
Constraints: []*structs.Constraint{
|
||||
{
|
||||
LTarget: "x",
|
||||
RTarget: "y",
|
||||
Operand: "z",
|
||||
},
|
||||
},
|
||||
RestartPolicy: &structs.RestartPolicy{
|
||||
Interval: 1 * time.Second,
|
||||
Attempts: 5,
|
||||
Delay: 10 * time.Second,
|
||||
Mode: "delay",
|
||||
},
|
||||
EphemeralDisk: &structs.EphemeralDisk{
|
||||
SizeMB: 100,
|
||||
Sticky: true,
|
||||
Migrate: true,
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"key": "value",
|
||||
},
|
||||
Tasks: []*structs.Task{
|
||||
{
|
||||
Name: "task1",
|
||||
Driver: "docker",
|
||||
User: "mary",
|
||||
Config: map[string]interface{}{
|
||||
"lol": "code",
|
||||
},
|
||||
Constraints: []*structs.Constraint{
|
||||
{
|
||||
LTarget: "x",
|
||||
RTarget: "y",
|
||||
Operand: "z",
|
||||
},
|
||||
},
|
||||
Env: map[string]string{
|
||||
"hello": "world",
|
||||
},
|
||||
Services: []*structs.Service{
|
||||
&structs.Service{
|
||||
Name: "serviceA",
|
||||
Tags: []string{"1", "2"},
|
||||
PortLabel: "foo",
|
||||
Checks: []*structs.ServiceCheck{
|
||||
&structs.ServiceCheck{
|
||||
Name: "bar",
|
||||
Type: "http",
|
||||
Command: "foo",
|
||||
Args: []string{"a", "b"},
|
||||
Path: "/check",
|
||||
Protocol: "http",
|
||||
PortLabel: "foo",
|
||||
Interval: 4 * time.Second,
|
||||
Timeout: 2 * time.Second,
|
||||
InitialStatus: "ok",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Resources: &structs.Resources{
|
||||
CPU: 100,
|
||||
MemoryMB: 10,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
IP: "10.10.11.1",
|
||||
MBits: 10,
|
||||
ReservedPorts: []structs.Port{
|
||||
{
|
||||
Label: "http",
|
||||
Value: 80,
|
||||
},
|
||||
},
|
||||
DynamicPorts: []structs.Port{
|
||||
{
|
||||
Label: "ssh",
|
||||
Value: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Meta: map[string]string{
|
||||
"lol": "code",
|
||||
},
|
||||
KillTimeout: 10 * time.Second,
|
||||
LogConfig: &structs.LogConfig{
|
||||
MaxFiles: 10,
|
||||
MaxFileSizeMB: 100,
|
||||
},
|
||||
Artifacts: []*structs.TaskArtifact{
|
||||
{
|
||||
GetterSource: "source",
|
||||
GetterOptions: map[string]string{
|
||||
"a": "b",
|
||||
},
|
||||
RelativeDest: "dest",
|
||||
},
|
||||
},
|
||||
Vault: &structs.Vault{
|
||||
Policies: []string{"a", "b", "c"},
|
||||
Env: true,
|
||||
ChangeMode: "c",
|
||||
ChangeSignal: "sighup",
|
||||
},
|
||||
Templates: []*structs.Template{
|
||||
{
|
||||
SourcePath: "source",
|
||||
DestPath: "dest",
|
||||
EmbeddedTmpl: "embedded",
|
||||
ChangeMode: "change",
|
||||
ChangeSignal: "SIGNAL",
|
||||
Splay: 1 * time.Minute,
|
||||
Perms: "666",
|
||||
},
|
||||
},
|
||||
DispatchPayload: &structs.DispatchPayloadConfig{
|
||||
File: "fileA",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
VaultToken: "token",
|
||||
Status: "status",
|
||||
StatusDescription: "status_desc",
|
||||
CreateIndex: 1,
|
||||
ModifyIndex: 3,
|
||||
JobModifyIndex: 5,
|
||||
}
|
||||
|
||||
structsJob := apiJobToStructJob(apiJob)
|
||||
|
||||
if !reflect.DeepEqual(expected, structsJob) {
|
||||
t.Fatalf("bad %#v", structsJob)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,6 +11,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/mitchellh/cli"
|
||||
)
|
||||
|
||||
|
@ -208,8 +210,8 @@ const (
|
|||
}`
|
||||
)
|
||||
|
||||
// Test StructJob with local jobfile
|
||||
func TestStructJobWithLocal(t *testing.T) {
|
||||
// Test APIJob with local jobfile
|
||||
func TestJobGetter_LocalFile(t *testing.T) {
|
||||
fh, err := ioutil.TempFile("", "nomad")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
@ -221,19 +223,52 @@ func TestStructJobWithLocal(t *testing.T) {
|
|||
}
|
||||
|
||||
j := &JobGetter{}
|
||||
sj, err := j.StructJob(fh.Name())
|
||||
aj, err := j.ApiJob(fh.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
err = sj.Validate()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
expected := &api.Job{
|
||||
ID: helper.StringToPtr("job1"),
|
||||
Region: helper.StringToPtr("global"),
|
||||
Priority: helper.IntToPtr(50),
|
||||
Name: helper.StringToPtr("job1"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
Datacenters: []string{"dc1"},
|
||||
TaskGroups: []*api.TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("group1"),
|
||||
Count: helper.IntToPtr(1),
|
||||
RestartPolicy: &api.RestartPolicy{
|
||||
Attempts: helper.IntToPtr(10),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
},
|
||||
EphemeralDisk: &api.EphemeralDisk{
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
},
|
||||
|
||||
Tasks: []*api.Task{
|
||||
{
|
||||
Driver: "exec",
|
||||
Name: "task1",
|
||||
Resources: &api.Resources{
|
||||
CPU: helper.IntToPtr(100),
|
||||
MemoryMB: helper.IntToPtr(10),
|
||||
IOPS: helper.IntToPtr(0),
|
||||
},
|
||||
LogConfig: api.DefaultLogConfig(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(expected, aj) {
|
||||
t.Fatalf("bad: %#v", aj)
|
||||
}
|
||||
}
|
||||
|
||||
// Test StructJob with jobfile from HTTP Server
|
||||
func TestStructJobWithHTTPServer(t *testing.T) {
|
||||
func TestAPIJob_HTTPServer(t *testing.T) {
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, job)
|
||||
})
|
||||
|
@ -243,13 +278,45 @@ func TestStructJobWithHTTPServer(t *testing.T) {
|
|||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
j := &JobGetter{}
|
||||
sj, err := j.StructJob("http://127.0.0.1:12345/")
|
||||
aj, err := j.ApiJob("http://127.0.0.1:12345/")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
expected := &api.Job{
|
||||
ID: helper.StringToPtr("job1"),
|
||||
Region: helper.StringToPtr("global"),
|
||||
Priority: helper.IntToPtr(50),
|
||||
Name: helper.StringToPtr("job1"),
|
||||
Type: helper.StringToPtr("service"),
|
||||
Datacenters: []string{"dc1"},
|
||||
TaskGroups: []*api.TaskGroup{
|
||||
{
|
||||
Name: helper.StringToPtr("group1"),
|
||||
Count: helper.IntToPtr(1),
|
||||
RestartPolicy: &api.RestartPolicy{
|
||||
Attempts: helper.IntToPtr(10),
|
||||
Mode: helper.StringToPtr("delay"),
|
||||
},
|
||||
EphemeralDisk: &api.EphemeralDisk{
|
||||
SizeMB: helper.IntToPtr(300),
|
||||
},
|
||||
|
||||
err = sj.Validate()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
Tasks: []*api.Task{
|
||||
{
|
||||
Driver: "exec",
|
||||
Name: "task1",
|
||||
Resources: &api.Resources{
|
||||
CPU: helper.IntToPtr(100),
|
||||
MemoryMB: helper.IntToPtr(10),
|
||||
IOPS: helper.IntToPtr(0),
|
||||
},
|
||||
LogConfig: api.DefaultLogConfig(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(expected, aj) {
|
||||
t.Fatalf("bad: %#v", aj)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/hashicorp/nomad/api"
|
||||
"github.com/hashicorp/nomad/helper"
|
||||
"github.com/hashicorp/nomad/testutil"
|
||||
)
|
||||
|
||||
|
@ -44,18 +45,18 @@ func testJob(jobID string) *api.Job {
|
|||
SetConfig("run_for", "5s").
|
||||
SetConfig("exit_code", 0).
|
||||
Require(&api.Resources{
|
||||
MemoryMB: 256,
|
||||
CPU: 100,
|
||||
MemoryMB: helper.IntToPtr(256),
|
||||
CPU: helper.IntToPtr(100),
|
||||
}).
|
||||
SetLogConfig(&api.LogConfig{
|
||||
MaxFiles: 1,
|
||||
MaxFileSizeMB: 2,
|
||||
MaxFiles: helper.IntToPtr(1),
|
||||
MaxFileSizeMB: helper.IntToPtr(2),
|
||||
})
|
||||
|
||||
group := api.NewTaskGroup("group1", 1).
|
||||
AddTask(task).
|
||||
RequireDisk(&api.EphemeralDisk{
|
||||
SizeMB: 20,
|
||||
SizeMB: helper.IntToPtr(20),
|
||||
})
|
||||
|
||||
job := api.NewBatchJob(jobID, jobID, "region1", 1).
|
||||
|
|
|
@ -3,11 +3,14 @@ package command
|
|||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/colorstring"
|
||||
)
|
||||
|
||||
type ValidateCommand struct {
|
||||
Meta
|
||||
JobGetter
|
||||
color *colorstring.Colorize
|
||||
}
|
||||
|
||||
func (c *ValidateCommand) Help() string {
|
||||
|
@ -62,10 +65,22 @@ func (c *ValidateCommand) Run(args []string) int {
|
|||
}
|
||||
|
||||
// Check that the job is valid
|
||||
if _, _, err := client.Jobs().Validate(job, nil); err != nil {
|
||||
jr, _, err := client.Jobs().Validate(job, nil)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error validating job: %s", err))
|
||||
return 1
|
||||
}
|
||||
if jr != nil && !jr.DriverConfigValidated {
|
||||
c.Ui.Output(c.Colorize().Color("[bold][orange]Driver configuration not validated.[reset]"))
|
||||
}
|
||||
|
||||
if jr != nil && len(jr.ValidationErrors) > 0 {
|
||||
c.Ui.Output("Job Validation errors:")
|
||||
for _, err := range jr.ValidationErrors {
|
||||
c.Ui.Output(err)
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// Done!
|
||||
c.Ui.Output("Job validation successful")
|
||||
|
|
|
@ -712,10 +712,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*api.Task, list
|
|||
}
|
||||
|
||||
// If we have logs then parse that
|
||||
logConfig := &api.LogConfig{
|
||||
MaxFiles: helper.IntToPtr(10),
|
||||
MaxFileSizeMB: helper.IntToPtr(10),
|
||||
}
|
||||
logConfig := api.DefaultLogConfig()
|
||||
|
||||
if o := listVal.Filter("logs"); len(o.Items) > 0 {
|
||||
if len(o.Items) > 1 {
|
||||
|
|
Loading…
Reference in New Issue