2015-09-08 23:24:26 +00:00
|
|
|
package api
|
|
|
|
|
|
|
|
import (
|
|
|
|
"reflect"
|
2015-09-17 20:15:45 +00:00
|
|
|
"sort"
|
2015-09-09 00:20:52 +00:00
|
|
|
"strings"
|
2015-09-08 23:24:26 +00:00
|
|
|
"testing"
|
2017-02-13 23:18:17 +00:00
|
|
|
"time"
|
2016-01-19 19:09:36 +00:00
|
|
|
|
2019-03-29 18:47:40 +00:00
|
|
|
"github.com/hashicorp/nomad/api/internal/testutil"
|
2017-04-18 02:39:20 +00:00
|
|
|
"github.com/kr/pretty"
|
2018-03-21 17:13:26 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2015-09-08 23:24:26 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestJobs_Register(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2018-03-21 17:13:26 +00:00
|
|
|
require := require.New(t)
|
|
|
|
|
2015-09-08 23:24:26 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Listing jobs before registering returns nothing
|
2018-04-04 01:24:59 +00:00
|
|
|
resp, _, err := jobs.List(nil)
|
2018-04-05 18:29:39 +00:00
|
|
|
require.Nil(err)
|
2018-03-21 17:13:26 +00:00
|
|
|
require.Emptyf(resp, "expected 0 jobs, got: %d", len(resp))
|
2015-09-08 23:24:26 +00:00
|
|
|
|
|
|
|
// Create a job and attempt to register it
|
2015-09-09 01:42:34 +00:00
|
|
|
job := testJob()
|
2017-05-10 03:52:47 +00:00
|
|
|
resp2, wm, err := jobs.Register(job, nil)
|
2018-03-21 17:13:26 +00:00
|
|
|
require.Nil(err)
|
|
|
|
require.NotNil(resp2)
|
|
|
|
require.NotEmpty(resp2.EvalID)
|
2015-09-09 01:42:34 +00:00
|
|
|
assertWriteMeta(t, wm)
|
2015-09-08 23:24:26 +00:00
|
|
|
|
|
|
|
// Query the jobs back out again
|
2018-04-04 01:24:59 +00:00
|
|
|
resp, qm, err := jobs.List(nil)
|
|
|
|
assertQueryMeta(t, qm)
|
2018-03-21 17:13:26 +00:00
|
|
|
require.Nil(err)
|
2015-09-08 23:24:26 +00:00
|
|
|
|
|
|
|
// Check that we got the expected response
|
2017-02-06 19:48:28 +00:00
|
|
|
if len(resp) != 1 || resp[0].ID != *job.ID {
|
2015-09-08 23:24:26 +00:00
|
|
|
t.Fatalf("bad: %#v", resp[0])
|
|
|
|
}
|
|
|
|
}
|
2015-09-09 00:20:52 +00:00
|
|
|
|
2017-02-06 19:48:28 +00:00
|
|
|
func TestJobs_Validate(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2017-02-06 19:48:28 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Create a job and attempt to register it
|
|
|
|
job := testJob()
|
|
|
|
resp, _, err := jobs.Validate(job, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp.ValidationErrors) != 0 {
|
|
|
|
t.Fatalf("bad %v", resp)
|
|
|
|
}
|
|
|
|
|
|
|
|
job.ID = nil
|
|
|
|
resp1, _, err := jobs.Validate(job, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(resp1.ValidationErrors) == 0 {
|
|
|
|
t.Fatalf("bad %v", resp1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-13 23:18:17 +00:00
|
|
|
func TestJobs_Canonicalize(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2017-02-13 23:18:17 +00:00
|
|
|
testCases := []struct {
|
|
|
|
name string
|
|
|
|
expected *Job
|
|
|
|
input *Job
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "empty",
|
|
|
|
input: &Job{
|
|
|
|
TaskGroups: []*TaskGroup{
|
|
|
|
{
|
|
|
|
Tasks: []*Task{
|
|
|
|
{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
ID: stringToPtr(""),
|
|
|
|
Name: stringToPtr(""),
|
|
|
|
Region: stringToPtr("global"),
|
|
|
|
Namespace: stringToPtr(DefaultNamespace),
|
|
|
|
Type: stringToPtr("service"),
|
|
|
|
ParentID: stringToPtr(""),
|
|
|
|
Priority: intToPtr(50),
|
|
|
|
AllAtOnce: boolToPtr(false),
|
2019-11-18 21:05:06 +00:00
|
|
|
ConsulToken: stringToPtr(""),
|
2019-01-18 18:28:35 +00:00
|
|
|
VaultToken: stringToPtr(""),
|
|
|
|
Status: stringToPtr(""),
|
|
|
|
StatusDescription: stringToPtr(""),
|
|
|
|
Stop: boolToPtr(false),
|
|
|
|
Stable: boolToPtr(false),
|
|
|
|
Version: uint64ToPtr(0),
|
|
|
|
CreateIndex: uint64ToPtr(0),
|
|
|
|
ModifyIndex: uint64ToPtr(0),
|
|
|
|
JobModifyIndex: uint64ToPtr(0),
|
2019-09-02 17:30:09 +00:00
|
|
|
Update: &UpdateStrategy{
|
|
|
|
Stagger: timeToPtr(30 * time.Second),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(5 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(10 * time.Minute),
|
|
|
|
AutoRevert: boolToPtr(false),
|
|
|
|
Canary: intToPtr(0),
|
|
|
|
AutoPromote: boolToPtr(false),
|
|
|
|
},
|
2017-02-13 23:18:17 +00:00
|
|
|
TaskGroups: []*TaskGroup{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr(""),
|
|
|
|
Count: intToPtr(1),
|
2017-02-13 23:18:17 +00:00
|
|
|
EphemeralDisk: &EphemeralDisk{
|
2019-01-18 18:28:35 +00:00
|
|
|
Sticky: boolToPtr(false),
|
|
|
|
Migrate: boolToPtr(false),
|
|
|
|
SizeMB: intToPtr(300),
|
2017-02-13 23:18:17 +00:00
|
|
|
},
|
|
|
|
RestartPolicy: &RestartPolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Delay: timeToPtr(15 * time.Second),
|
|
|
|
Attempts: intToPtr(2),
|
|
|
|
Interval: timeToPtr(30 * time.Minute),
|
|
|
|
Mode: stringToPtr("fail"),
|
2017-02-13 23:18:17 +00:00
|
|
|
},
|
2018-01-18 20:49:01 +00:00
|
|
|
ReschedulePolicy: &ReschedulePolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Attempts: intToPtr(0),
|
|
|
|
Interval: timeToPtr(0),
|
|
|
|
DelayFunction: stringToPtr("exponential"),
|
|
|
|
Delay: timeToPtr(30 * time.Second),
|
|
|
|
MaxDelay: timeToPtr(1 * time.Hour),
|
|
|
|
Unlimited: boolToPtr(true),
|
2018-01-18 20:49:01 +00:00
|
|
|
},
|
2019-09-02 17:30:09 +00:00
|
|
|
Update: &UpdateStrategy{
|
|
|
|
Stagger: timeToPtr(30 * time.Second),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(5 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(10 * time.Minute),
|
|
|
|
AutoRevert: boolToPtr(false),
|
|
|
|
Canary: intToPtr(0),
|
|
|
|
AutoPromote: boolToPtr(false),
|
|
|
|
},
|
2018-03-20 19:11:08 +00:00
|
|
|
Migrate: DefaultMigrateStrategy(),
|
2017-02-13 23:18:17 +00:00
|
|
|
Tasks: []*Task{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
KillTimeout: timeToPtr(5 * time.Second),
|
2017-02-13 23:18:17 +00:00
|
|
|
LogConfig: DefaultLogConfig(),
|
2017-11-13 17:05:30 +00:00
|
|
|
Resources: DefaultResources(),
|
2017-02-13 23:18:17 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-09-02 17:30:09 +00:00
|
|
|
{
|
|
|
|
name: "batch",
|
|
|
|
input: &Job{
|
|
|
|
Type: stringToPtr("batch"),
|
|
|
|
TaskGroups: []*TaskGroup{
|
|
|
|
{
|
|
|
|
Tasks: []*Task{
|
|
|
|
{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: &Job{
|
|
|
|
ID: stringToPtr(""),
|
|
|
|
Name: stringToPtr(""),
|
|
|
|
Region: stringToPtr("global"),
|
|
|
|
Namespace: stringToPtr(DefaultNamespace),
|
|
|
|
Type: stringToPtr("batch"),
|
|
|
|
ParentID: stringToPtr(""),
|
|
|
|
Priority: intToPtr(50),
|
|
|
|
AllAtOnce: boolToPtr(false),
|
2019-11-18 21:05:06 +00:00
|
|
|
ConsulToken: stringToPtr(""),
|
2019-09-02 17:30:09 +00:00
|
|
|
VaultToken: stringToPtr(""),
|
|
|
|
Status: stringToPtr(""),
|
|
|
|
StatusDescription: stringToPtr(""),
|
|
|
|
Stop: boolToPtr(false),
|
|
|
|
Stable: boolToPtr(false),
|
|
|
|
Version: uint64ToPtr(0),
|
|
|
|
CreateIndex: uint64ToPtr(0),
|
|
|
|
ModifyIndex: uint64ToPtr(0),
|
|
|
|
JobModifyIndex: uint64ToPtr(0),
|
|
|
|
TaskGroups: []*TaskGroup{
|
|
|
|
{
|
|
|
|
Name: stringToPtr(""),
|
|
|
|
Count: intToPtr(1),
|
|
|
|
EphemeralDisk: &EphemeralDisk{
|
|
|
|
Sticky: boolToPtr(false),
|
|
|
|
Migrate: boolToPtr(false),
|
|
|
|
SizeMB: intToPtr(300),
|
|
|
|
},
|
|
|
|
RestartPolicy: &RestartPolicy{
|
|
|
|
Delay: timeToPtr(15 * time.Second),
|
|
|
|
Attempts: intToPtr(3),
|
|
|
|
Interval: timeToPtr(24 * time.Hour),
|
|
|
|
Mode: stringToPtr("fail"),
|
|
|
|
},
|
|
|
|
ReschedulePolicy: &ReschedulePolicy{
|
|
|
|
Attempts: intToPtr(1),
|
|
|
|
Interval: timeToPtr(24 * time.Hour),
|
|
|
|
DelayFunction: stringToPtr("constant"),
|
|
|
|
Delay: timeToPtr(5 * time.Second),
|
|
|
|
MaxDelay: timeToPtr(0),
|
|
|
|
Unlimited: boolToPtr(false),
|
|
|
|
},
|
|
|
|
Tasks: []*Task{
|
|
|
|
{
|
|
|
|
KillTimeout: timeToPtr(5 * time.Second),
|
|
|
|
LogConfig: DefaultLogConfig(),
|
|
|
|
Resources: DefaultResources(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-02-13 23:18:17 +00:00
|
|
|
{
|
|
|
|
name: "partial",
|
|
|
|
input: &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("foo"),
|
|
|
|
Namespace: stringToPtr("bar"),
|
|
|
|
ID: stringToPtr("bar"),
|
|
|
|
ParentID: stringToPtr("lol"),
|
2017-02-13 23:18:17 +00:00
|
|
|
TaskGroups: []*TaskGroup{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("bar"),
|
2017-02-13 23:18:17 +00:00
|
|
|
Tasks: []*Task{
|
|
|
|
{
|
|
|
|
Name: "task1",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
Namespace: stringToPtr("bar"),
|
|
|
|
ID: stringToPtr("bar"),
|
|
|
|
Name: stringToPtr("foo"),
|
|
|
|
Region: stringToPtr("global"),
|
|
|
|
Type: stringToPtr("service"),
|
|
|
|
ParentID: stringToPtr("lol"),
|
|
|
|
Priority: intToPtr(50),
|
|
|
|
AllAtOnce: boolToPtr(false),
|
2019-11-18 21:05:06 +00:00
|
|
|
ConsulToken: stringToPtr(""),
|
2019-01-18 18:28:35 +00:00
|
|
|
VaultToken: stringToPtr(""),
|
|
|
|
Stop: boolToPtr(false),
|
|
|
|
Stable: boolToPtr(false),
|
|
|
|
Version: uint64ToPtr(0),
|
|
|
|
Status: stringToPtr(""),
|
|
|
|
StatusDescription: stringToPtr(""),
|
|
|
|
CreateIndex: uint64ToPtr(0),
|
|
|
|
ModifyIndex: uint64ToPtr(0),
|
|
|
|
JobModifyIndex: uint64ToPtr(0),
|
2019-09-02 17:30:09 +00:00
|
|
|
Update: &UpdateStrategy{
|
|
|
|
Stagger: timeToPtr(30 * time.Second),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(5 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(10 * time.Minute),
|
|
|
|
AutoRevert: boolToPtr(false),
|
|
|
|
Canary: intToPtr(0),
|
|
|
|
AutoPromote: boolToPtr(false),
|
|
|
|
},
|
2017-02-13 23:18:17 +00:00
|
|
|
TaskGroups: []*TaskGroup{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("bar"),
|
|
|
|
Count: intToPtr(1),
|
2017-02-13 23:18:17 +00:00
|
|
|
EphemeralDisk: &EphemeralDisk{
|
2019-01-18 18:28:35 +00:00
|
|
|
Sticky: boolToPtr(false),
|
|
|
|
Migrate: boolToPtr(false),
|
|
|
|
SizeMB: intToPtr(300),
|
2017-02-13 23:18:17 +00:00
|
|
|
},
|
|
|
|
RestartPolicy: &RestartPolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Delay: timeToPtr(15 * time.Second),
|
|
|
|
Attempts: intToPtr(2),
|
|
|
|
Interval: timeToPtr(30 * time.Minute),
|
|
|
|
Mode: stringToPtr("fail"),
|
2017-02-13 23:18:17 +00:00
|
|
|
},
|
2018-01-18 20:49:01 +00:00
|
|
|
ReschedulePolicy: &ReschedulePolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Attempts: intToPtr(0),
|
|
|
|
Interval: timeToPtr(0),
|
|
|
|
DelayFunction: stringToPtr("exponential"),
|
|
|
|
Delay: timeToPtr(30 * time.Second),
|
|
|
|
MaxDelay: timeToPtr(1 * time.Hour),
|
|
|
|
Unlimited: boolToPtr(true),
|
2018-01-18 20:49:01 +00:00
|
|
|
},
|
2019-09-02 17:30:09 +00:00
|
|
|
Update: &UpdateStrategy{
|
|
|
|
Stagger: timeToPtr(30 * time.Second),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(5 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(10 * time.Minute),
|
|
|
|
AutoRevert: boolToPtr(false),
|
|
|
|
Canary: intToPtr(0),
|
|
|
|
AutoPromote: boolToPtr(false),
|
|
|
|
},
|
2018-03-20 19:11:08 +00:00
|
|
|
Migrate: DefaultMigrateStrategy(),
|
2017-02-13 23:18:17 +00:00
|
|
|
Tasks: []*Task{
|
|
|
|
{
|
|
|
|
Name: "task1",
|
|
|
|
LogConfig: DefaultLogConfig(),
|
2017-11-13 17:05:30 +00:00
|
|
|
Resources: DefaultResources(),
|
2019-01-18 18:28:35 +00:00
|
|
|
KillTimeout: timeToPtr(5 * time.Second),
|
2017-02-13 23:18:17 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-02-24 20:08:31 +00:00
|
|
|
{
|
|
|
|
name: "example_template",
|
|
|
|
input: &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
ID: stringToPtr("example_template"),
|
|
|
|
Name: stringToPtr("example_template"),
|
2017-02-24 20:08:31 +00:00
|
|
|
Datacenters: []string{"dc1"},
|
2019-01-18 18:28:35 +00:00
|
|
|
Type: stringToPtr("service"),
|
2017-02-24 20:08:31 +00:00
|
|
|
Update: &UpdateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(1),
|
2019-07-17 21:55:26 +00:00
|
|
|
AutoPromote: boolToPtr(true),
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
|
|
|
TaskGroups: []*TaskGroup{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("cache"),
|
|
|
|
Count: intToPtr(1),
|
2017-02-24 20:08:31 +00:00
|
|
|
RestartPolicy: &RestartPolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Interval: timeToPtr(5 * time.Minute),
|
|
|
|
Attempts: intToPtr(10),
|
|
|
|
Delay: timeToPtr(25 * time.Second),
|
|
|
|
Mode: stringToPtr("delay"),
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
2019-07-17 21:55:26 +00:00
|
|
|
Update: &UpdateStrategy{
|
|
|
|
AutoRevert: boolToPtr(true),
|
|
|
|
},
|
2017-02-24 20:08:31 +00:00
|
|
|
EphemeralDisk: &EphemeralDisk{
|
2019-01-18 18:28:35 +00:00
|
|
|
SizeMB: intToPtr(300),
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
|
|
|
Tasks: []*Task{
|
|
|
|
{
|
|
|
|
Name: "redis",
|
|
|
|
Driver: "docker",
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"image": "redis:3.2",
|
2017-11-01 11:29:15 +00:00
|
|
|
"port_map": []map[string]int{{
|
2017-02-24 20:08:31 +00:00
|
|
|
"db": 6379,
|
2017-11-01 11:29:15 +00:00
|
|
|
}},
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
|
|
|
Resources: &Resources{
|
2019-01-18 18:28:35 +00:00
|
|
|
CPU: intToPtr(500),
|
|
|
|
MemoryMB: intToPtr(256),
|
2017-02-24 20:08:31 +00:00
|
|
|
Networks: []*NetworkResource{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
MBits: intToPtr(10),
|
2017-02-24 20:08:31 +00:00
|
|
|
DynamicPorts: []Port{
|
|
|
|
{
|
|
|
|
Label: "db",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-03-01 23:30:01 +00:00
|
|
|
Services: []*Service{
|
2017-02-24 20:08:31 +00:00
|
|
|
{
|
2018-04-19 22:12:23 +00:00
|
|
|
Name: "redis-cache",
|
|
|
|
Tags: []string{"global", "cache"},
|
|
|
|
CanaryTags: []string{"canary", "global", "cache"},
|
|
|
|
PortLabel: "db",
|
2017-02-24 20:08:31 +00:00
|
|
|
Checks: []ServiceCheck{
|
|
|
|
{
|
|
|
|
Name: "alive",
|
|
|
|
Type: "tcp",
|
|
|
|
Interval: 10 * time.Second,
|
|
|
|
Timeout: 2 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Templates: []*Template{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
EmbeddedTmpl: stringToPtr("---"),
|
|
|
|
DestPath: stringToPtr("local/file.yml"),
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
2017-05-26 23:42:16 +00:00
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
EmbeddedTmpl: stringToPtr("FOO=bar\n"),
|
|
|
|
DestPath: stringToPtr("local/file.env"),
|
|
|
|
Envvars: boolToPtr(true),
|
2017-05-26 23:42:16 +00:00
|
|
|
},
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
Namespace: stringToPtr(DefaultNamespace),
|
|
|
|
ID: stringToPtr("example_template"),
|
|
|
|
Name: stringToPtr("example_template"),
|
|
|
|
ParentID: stringToPtr(""),
|
|
|
|
Priority: intToPtr(50),
|
|
|
|
Region: stringToPtr("global"),
|
|
|
|
Type: stringToPtr("service"),
|
|
|
|
AllAtOnce: boolToPtr(false),
|
2019-11-18 21:05:06 +00:00
|
|
|
ConsulToken: stringToPtr(""),
|
2019-01-18 18:28:35 +00:00
|
|
|
VaultToken: stringToPtr(""),
|
|
|
|
Stop: boolToPtr(false),
|
|
|
|
Stable: boolToPtr(false),
|
|
|
|
Version: uint64ToPtr(0),
|
|
|
|
Status: stringToPtr(""),
|
|
|
|
StatusDescription: stringToPtr(""),
|
|
|
|
CreateIndex: uint64ToPtr(0),
|
|
|
|
ModifyIndex: uint64ToPtr(0),
|
|
|
|
JobModifyIndex: uint64ToPtr(0),
|
2017-02-24 20:08:31 +00:00
|
|
|
Datacenters: []string{"dc1"},
|
|
|
|
Update: &UpdateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Stagger: timeToPtr(30 * time.Second),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(5 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(10 * time.Minute),
|
|
|
|
AutoRevert: boolToPtr(false),
|
|
|
|
Canary: intToPtr(0),
|
2019-07-17 21:55:26 +00:00
|
|
|
AutoPromote: boolToPtr(true),
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
|
|
|
TaskGroups: []*TaskGroup{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("cache"),
|
|
|
|
Count: intToPtr(1),
|
2017-02-24 20:08:31 +00:00
|
|
|
RestartPolicy: &RestartPolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Interval: timeToPtr(5 * time.Minute),
|
|
|
|
Attempts: intToPtr(10),
|
|
|
|
Delay: timeToPtr(25 * time.Second),
|
|
|
|
Mode: stringToPtr("delay"),
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
2018-01-18 20:49:01 +00:00
|
|
|
ReschedulePolicy: &ReschedulePolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Attempts: intToPtr(0),
|
|
|
|
Interval: timeToPtr(0),
|
|
|
|
DelayFunction: stringToPtr("exponential"),
|
|
|
|
Delay: timeToPtr(30 * time.Second),
|
|
|
|
MaxDelay: timeToPtr(1 * time.Hour),
|
|
|
|
Unlimited: boolToPtr(true),
|
2018-01-18 20:49:01 +00:00
|
|
|
},
|
2017-02-24 20:08:31 +00:00
|
|
|
EphemeralDisk: &EphemeralDisk{
|
2019-01-18 18:28:35 +00:00
|
|
|
Sticky: boolToPtr(false),
|
|
|
|
Migrate: boolToPtr(false),
|
|
|
|
SizeMB: intToPtr(300),
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
2017-05-10 03:52:47 +00:00
|
|
|
|
|
|
|
Update: &UpdateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Stagger: timeToPtr(30 * time.Second),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(5 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(10 * time.Minute),
|
2019-07-17 21:55:26 +00:00
|
|
|
AutoRevert: boolToPtr(true),
|
2019-01-18 18:28:35 +00:00
|
|
|
Canary: intToPtr(0),
|
2019-07-17 21:55:26 +00:00
|
|
|
AutoPromote: boolToPtr(true),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
2018-03-20 19:11:08 +00:00
|
|
|
Migrate: DefaultMigrateStrategy(),
|
2017-02-24 20:08:31 +00:00
|
|
|
Tasks: []*Task{
|
|
|
|
{
|
|
|
|
Name: "redis",
|
|
|
|
Driver: "docker",
|
|
|
|
Config: map[string]interface{}{
|
|
|
|
"image": "redis:3.2",
|
2017-11-01 11:29:15 +00:00
|
|
|
"port_map": []map[string]int{{
|
2017-02-24 20:08:31 +00:00
|
|
|
"db": 6379,
|
2017-11-01 11:29:15 +00:00
|
|
|
}},
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
|
|
|
Resources: &Resources{
|
2019-01-18 18:28:35 +00:00
|
|
|
CPU: intToPtr(500),
|
|
|
|
MemoryMB: intToPtr(256),
|
2017-02-24 20:08:31 +00:00
|
|
|
Networks: []*NetworkResource{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
MBits: intToPtr(10),
|
2017-02-24 20:08:31 +00:00
|
|
|
DynamicPorts: []Port{
|
|
|
|
{
|
|
|
|
Label: "db",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-03-01 23:30:01 +00:00
|
|
|
Services: []*Service{
|
2017-02-24 20:08:31 +00:00
|
|
|
{
|
2018-01-17 20:10:41 +00:00
|
|
|
Name: "redis-cache",
|
2017-06-21 22:28:51 +00:00
|
|
|
Tags: []string{"global", "cache"},
|
2018-04-19 22:12:23 +00:00
|
|
|
CanaryTags: []string{"canary", "global", "cache"},
|
2017-06-21 22:28:51 +00:00
|
|
|
PortLabel: "db",
|
|
|
|
AddressMode: "auto",
|
2017-02-24 20:08:31 +00:00
|
|
|
Checks: []ServiceCheck{
|
|
|
|
{
|
|
|
|
Name: "alive",
|
|
|
|
Type: "tcp",
|
|
|
|
Interval: 10 * time.Second,
|
|
|
|
Timeout: 2 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-01-18 18:28:35 +00:00
|
|
|
KillTimeout: timeToPtr(5 * time.Second),
|
2017-02-24 20:08:31 +00:00
|
|
|
LogConfig: DefaultLogConfig(),
|
|
|
|
Templates: []*Template{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
SourcePath: stringToPtr(""),
|
|
|
|
DestPath: stringToPtr("local/file.yml"),
|
|
|
|
EmbeddedTmpl: stringToPtr("---"),
|
|
|
|
ChangeMode: stringToPtr("restart"),
|
|
|
|
ChangeSignal: stringToPtr(""),
|
|
|
|
Splay: timeToPtr(5 * time.Second),
|
|
|
|
Perms: stringToPtr("0644"),
|
|
|
|
LeftDelim: stringToPtr("{{"),
|
|
|
|
RightDelim: stringToPtr("}}"),
|
|
|
|
Envvars: boolToPtr(false),
|
2017-05-26 23:42:16 +00:00
|
|
|
},
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
SourcePath: stringToPtr(""),
|
|
|
|
DestPath: stringToPtr("local/file.env"),
|
|
|
|
EmbeddedTmpl: stringToPtr("FOO=bar\n"),
|
|
|
|
ChangeMode: stringToPtr("restart"),
|
|
|
|
ChangeSignal: stringToPtr(""),
|
|
|
|
Splay: timeToPtr(5 * time.Second),
|
|
|
|
Perms: stringToPtr("0644"),
|
|
|
|
LeftDelim: stringToPtr("{{"),
|
|
|
|
RightDelim: stringToPtr("}}"),
|
|
|
|
Envvars: boolToPtr(true),
|
2017-02-24 20:08:31 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2017-02-20 23:13:10 +00:00
|
|
|
{
|
|
|
|
name: "periodic",
|
|
|
|
input: &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
ID: stringToPtr("bar"),
|
2017-02-20 23:13:10 +00:00
|
|
|
Periodic: &PeriodicConfig{},
|
|
|
|
},
|
|
|
|
expected: &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
Namespace: stringToPtr(DefaultNamespace),
|
|
|
|
ID: stringToPtr("bar"),
|
|
|
|
ParentID: stringToPtr(""),
|
|
|
|
Name: stringToPtr("bar"),
|
|
|
|
Region: stringToPtr("global"),
|
|
|
|
Type: stringToPtr("service"),
|
|
|
|
Priority: intToPtr(50),
|
|
|
|
AllAtOnce: boolToPtr(false),
|
2019-11-18 21:05:06 +00:00
|
|
|
ConsulToken: stringToPtr(""),
|
2019-01-18 18:28:35 +00:00
|
|
|
VaultToken: stringToPtr(""),
|
|
|
|
Stop: boolToPtr(false),
|
|
|
|
Stable: boolToPtr(false),
|
|
|
|
Version: uint64ToPtr(0),
|
|
|
|
Status: stringToPtr(""),
|
|
|
|
StatusDescription: stringToPtr(""),
|
|
|
|
CreateIndex: uint64ToPtr(0),
|
|
|
|
ModifyIndex: uint64ToPtr(0),
|
|
|
|
JobModifyIndex: uint64ToPtr(0),
|
2019-09-02 17:30:09 +00:00
|
|
|
Update: &UpdateStrategy{
|
|
|
|
Stagger: timeToPtr(30 * time.Second),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(5 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(10 * time.Minute),
|
|
|
|
AutoRevert: boolToPtr(false),
|
|
|
|
Canary: intToPtr(0),
|
|
|
|
AutoPromote: boolToPtr(false),
|
|
|
|
},
|
2017-02-20 23:13:10 +00:00
|
|
|
Periodic: &PeriodicConfig{
|
2019-01-18 18:28:35 +00:00
|
|
|
Enabled: boolToPtr(true),
|
|
|
|
Spec: stringToPtr(""),
|
|
|
|
SpecType: stringToPtr(PeriodicSpecCron),
|
|
|
|
ProhibitOverlap: boolToPtr(false),
|
|
|
|
TimeZone: stringToPtr("UTC"),
|
2017-02-20 23:13:10 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-05-10 03:52:47 +00:00
|
|
|
|
|
|
|
{
|
|
|
|
name: "update_merge",
|
|
|
|
input: &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("foo"),
|
|
|
|
ID: stringToPtr("bar"),
|
|
|
|
ParentID: stringToPtr("lol"),
|
2017-05-10 03:52:47 +00:00
|
|
|
Update: &UpdateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Stagger: timeToPtr(1 * time.Second),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(6 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(7 * time.Minute),
|
|
|
|
AutoRevert: boolToPtr(false),
|
|
|
|
Canary: intToPtr(0),
|
2019-05-20 19:44:49 +00:00
|
|
|
AutoPromote: boolToPtr(false),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
|
|
|
TaskGroups: []*TaskGroup{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("bar"),
|
2017-05-10 03:52:47 +00:00
|
|
|
Update: &UpdateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Stagger: timeToPtr(2 * time.Second),
|
|
|
|
MaxParallel: intToPtr(2),
|
|
|
|
HealthCheck: stringToPtr("manual"),
|
|
|
|
MinHealthyTime: timeToPtr(1 * time.Second),
|
|
|
|
AutoRevert: boolToPtr(true),
|
|
|
|
Canary: intToPtr(1),
|
2019-05-20 19:44:49 +00:00
|
|
|
AutoPromote: boolToPtr(true),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
|
|
|
Tasks: []*Task{
|
|
|
|
{
|
|
|
|
Name: "task1",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("baz"),
|
2017-05-10 03:52:47 +00:00
|
|
|
Tasks: []*Task{
|
|
|
|
{
|
|
|
|
Name: "task1",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
expected: &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
Namespace: stringToPtr(DefaultNamespace),
|
|
|
|
ID: stringToPtr("bar"),
|
|
|
|
Name: stringToPtr("foo"),
|
|
|
|
Region: stringToPtr("global"),
|
|
|
|
Type: stringToPtr("service"),
|
|
|
|
ParentID: stringToPtr("lol"),
|
|
|
|
Priority: intToPtr(50),
|
|
|
|
AllAtOnce: boolToPtr(false),
|
2019-11-18 21:05:06 +00:00
|
|
|
ConsulToken: stringToPtr(""),
|
2019-01-18 18:28:35 +00:00
|
|
|
VaultToken: stringToPtr(""),
|
|
|
|
Stop: boolToPtr(false),
|
|
|
|
Stable: boolToPtr(false),
|
|
|
|
Version: uint64ToPtr(0),
|
|
|
|
Status: stringToPtr(""),
|
|
|
|
StatusDescription: stringToPtr(""),
|
|
|
|
CreateIndex: uint64ToPtr(0),
|
|
|
|
ModifyIndex: uint64ToPtr(0),
|
|
|
|
JobModifyIndex: uint64ToPtr(0),
|
2017-05-10 03:52:47 +00:00
|
|
|
Update: &UpdateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Stagger: timeToPtr(1 * time.Second),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(6 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(7 * time.Minute),
|
|
|
|
AutoRevert: boolToPtr(false),
|
|
|
|
Canary: intToPtr(0),
|
2019-05-20 19:44:49 +00:00
|
|
|
AutoPromote: boolToPtr(false),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
|
|
|
TaskGroups: []*TaskGroup{
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("bar"),
|
|
|
|
Count: intToPtr(1),
|
2017-05-10 03:52:47 +00:00
|
|
|
EphemeralDisk: &EphemeralDisk{
|
2019-01-18 18:28:35 +00:00
|
|
|
Sticky: boolToPtr(false),
|
|
|
|
Migrate: boolToPtr(false),
|
|
|
|
SizeMB: intToPtr(300),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
|
|
|
RestartPolicy: &RestartPolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Delay: timeToPtr(15 * time.Second),
|
|
|
|
Attempts: intToPtr(2),
|
|
|
|
Interval: timeToPtr(30 * time.Minute),
|
|
|
|
Mode: stringToPtr("fail"),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
2018-01-18 20:49:01 +00:00
|
|
|
ReschedulePolicy: &ReschedulePolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Attempts: intToPtr(0),
|
|
|
|
Interval: timeToPtr(0),
|
|
|
|
DelayFunction: stringToPtr("exponential"),
|
|
|
|
Delay: timeToPtr(30 * time.Second),
|
|
|
|
MaxDelay: timeToPtr(1 * time.Hour),
|
|
|
|
Unlimited: boolToPtr(true),
|
2018-01-18 20:49:01 +00:00
|
|
|
},
|
2017-05-10 03:52:47 +00:00
|
|
|
Update: &UpdateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Stagger: timeToPtr(2 * time.Second),
|
|
|
|
MaxParallel: intToPtr(2),
|
|
|
|
HealthCheck: stringToPtr("manual"),
|
|
|
|
MinHealthyTime: timeToPtr(1 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(6 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(7 * time.Minute),
|
|
|
|
AutoRevert: boolToPtr(true),
|
|
|
|
Canary: intToPtr(1),
|
2019-05-20 19:44:49 +00:00
|
|
|
AutoPromote: boolToPtr(true),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
2018-03-20 19:11:08 +00:00
|
|
|
Migrate: DefaultMigrateStrategy(),
|
2017-05-10 03:52:47 +00:00
|
|
|
Tasks: []*Task{
|
|
|
|
{
|
|
|
|
Name: "task1",
|
|
|
|
LogConfig: DefaultLogConfig(),
|
2017-11-13 17:05:30 +00:00
|
|
|
Resources: DefaultResources(),
|
2019-01-18 18:28:35 +00:00
|
|
|
KillTimeout: timeToPtr(5 * time.Second),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("baz"),
|
|
|
|
Count: intToPtr(1),
|
2017-05-10 03:52:47 +00:00
|
|
|
EphemeralDisk: &EphemeralDisk{
|
2019-01-18 18:28:35 +00:00
|
|
|
Sticky: boolToPtr(false),
|
|
|
|
Migrate: boolToPtr(false),
|
|
|
|
SizeMB: intToPtr(300),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
|
|
|
RestartPolicy: &RestartPolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Delay: timeToPtr(15 * time.Second),
|
|
|
|
Attempts: intToPtr(2),
|
|
|
|
Interval: timeToPtr(30 * time.Minute),
|
|
|
|
Mode: stringToPtr("fail"),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
2018-01-18 20:49:01 +00:00
|
|
|
ReschedulePolicy: &ReschedulePolicy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Attempts: intToPtr(0),
|
|
|
|
Interval: timeToPtr(0),
|
|
|
|
DelayFunction: stringToPtr("exponential"),
|
|
|
|
Delay: timeToPtr(30 * time.Second),
|
|
|
|
MaxDelay: timeToPtr(1 * time.Hour),
|
|
|
|
Unlimited: boolToPtr(true),
|
2018-01-18 20:49:01 +00:00
|
|
|
},
|
2017-05-10 03:52:47 +00:00
|
|
|
Update: &UpdateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
Stagger: timeToPtr(1 * time.Second),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(6 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(7 * time.Minute),
|
|
|
|
AutoRevert: boolToPtr(false),
|
|
|
|
Canary: intToPtr(0),
|
2019-05-20 19:44:49 +00:00
|
|
|
AutoPromote: boolToPtr(false),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
2018-03-20 19:11:08 +00:00
|
|
|
Migrate: DefaultMigrateStrategy(),
|
2017-05-10 03:52:47 +00:00
|
|
|
Tasks: []*Task{
|
|
|
|
{
|
|
|
|
Name: "task1",
|
|
|
|
LogConfig: DefaultLogConfig(),
|
2017-11-13 17:05:30 +00:00
|
|
|
Resources: DefaultResources(),
|
2019-01-18 18:28:35 +00:00
|
|
|
KillTimeout: timeToPtr(5 * time.Second),
|
2017-05-10 03:52:47 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-02-13 23:18:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
tc.input.Canonicalize()
|
|
|
|
if !reflect.DeepEqual(tc.input, tc.expected) {
|
2017-07-07 02:08:51 +00:00
|
|
|
t.Fatalf("Name: %v, Diffs:\n%v", tc.name, pretty.Diff(tc.expected, tc.input))
|
2017-02-13 23:18:17 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-08 23:48:02 +00:00
|
|
|
func TestJobs_EnforceRegister(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2018-04-04 01:24:59 +00:00
|
|
|
require := require.New(t)
|
2016-06-08 23:48:02 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Listing jobs before registering returns nothing
|
2018-04-04 01:24:59 +00:00
|
|
|
resp, _, err := jobs.List(nil)
|
|
|
|
require.Nil(err)
|
|
|
|
require.Empty(resp)
|
2016-06-08 23:48:02 +00:00
|
|
|
|
|
|
|
// Create a job and attempt to register it with an incorrect index.
|
|
|
|
job := testJob()
|
2017-09-26 22:26:33 +00:00
|
|
|
resp2, _, err := jobs.EnforceRegister(job, 10, nil)
|
2018-04-04 01:24:59 +00:00
|
|
|
require.NotNil(err)
|
|
|
|
require.Contains(err.Error(), RegisterEnforceIndexErrPrefix)
|
2016-06-08 23:48:02 +00:00
|
|
|
|
|
|
|
// Register
|
2017-09-26 22:26:33 +00:00
|
|
|
resp2, wm, err := jobs.EnforceRegister(job, 0, nil)
|
2018-04-04 01:24:59 +00:00
|
|
|
require.Nil(err)
|
|
|
|
require.NotNil(resp2)
|
|
|
|
require.NotZero(resp2.EvalID)
|
2016-06-08 23:48:02 +00:00
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
// Query the jobs back out again
|
2018-04-04 01:24:59 +00:00
|
|
|
resp, qm, err := jobs.List(nil)
|
|
|
|
require.Nil(err)
|
|
|
|
require.Len(resp, 1)
|
|
|
|
require.Equal(*job.ID, resp[0].ID)
|
2016-06-08 23:48:02 +00:00
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
|
|
|
|
// Fail at incorrect index
|
2018-04-04 01:24:59 +00:00
|
|
|
curIndex := resp[0].JobModifyIndex
|
2017-09-26 22:26:33 +00:00
|
|
|
resp2, _, err = jobs.EnforceRegister(job, 123456, nil)
|
2018-04-04 01:24:59 +00:00
|
|
|
require.NotNil(err)
|
|
|
|
require.Contains(err.Error(), RegisterEnforceIndexErrPrefix)
|
2016-06-08 23:48:02 +00:00
|
|
|
|
|
|
|
// Works at correct index
|
2017-05-10 03:52:47 +00:00
|
|
|
resp3, wm, err := jobs.EnforceRegister(job, curIndex, nil)
|
2018-04-04 01:24:59 +00:00
|
|
|
require.Nil(err)
|
|
|
|
require.NotNil(resp3)
|
|
|
|
require.NotZero(resp3.EvalID)
|
2016-06-08 23:48:02 +00:00
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
}
|
|
|
|
|
2017-04-19 21:57:28 +00:00
|
|
|
func TestJobs_Revert(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2017-04-19 21:57:28 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Register twice
|
|
|
|
job := testJob()
|
2017-05-10 03:52:47 +00:00
|
|
|
resp, wm, err := jobs.Register(job, nil)
|
2017-04-19 21:57:28 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2017-05-10 03:52:47 +00:00
|
|
|
if resp == nil || resp.EvalID == "" {
|
2017-04-19 21:57:28 +00:00
|
|
|
t.Fatalf("missing eval id")
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
2017-07-07 02:08:51 +00:00
|
|
|
job.Meta = map[string]string{"foo": "new"}
|
2017-05-10 03:52:47 +00:00
|
|
|
resp, wm, err = jobs.Register(job, nil)
|
2017-04-19 21:57:28 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2017-05-10 03:52:47 +00:00
|
|
|
if resp == nil || resp.EvalID == "" {
|
2017-04-19 21:57:28 +00:00
|
|
|
t.Fatalf("missing eval id")
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
// Fail revert at incorrect enforce
|
2019-11-18 21:05:06 +00:00
|
|
|
_, _, err = jobs.Revert(*job.ID, 0, uint64ToPtr(10), nil, "", "")
|
2017-04-19 21:57:28 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "enforcing version") {
|
|
|
|
t.Fatalf("expected enforcement error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Works at correct index
|
2019-11-18 21:05:06 +00:00
|
|
|
revertResp, wm, err := jobs.Revert(*job.ID, 0, uint64ToPtr(1), nil, "", "")
|
2017-04-19 21:57:28 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if revertResp.EvalID == "" {
|
|
|
|
t.Fatalf("missing eval id")
|
|
|
|
}
|
|
|
|
if revertResp.EvalCreateIndex == 0 {
|
|
|
|
t.Fatalf("bad eval create index")
|
|
|
|
}
|
|
|
|
if revertResp.JobModifyIndex == 0 {
|
|
|
|
t.Fatalf("bad job modify index")
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
}
|
|
|
|
|
2015-09-09 00:49:31 +00:00
|
|
|
func TestJobs_Info(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-09 00:20:52 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Trying to retrieve a job by ID before it exists
|
|
|
|
// returns an error
|
2019-11-07 13:35:39 +00:00
|
|
|
id := "job-id/with\\troublesome:characters\n?&字\000"
|
|
|
|
_, _, err := jobs.Info(id, nil)
|
2015-09-09 00:20:52 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
|
|
t.Fatalf("expected not found error, got: %#v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the job
|
2015-09-09 01:42:34 +00:00
|
|
|
job := testJob()
|
2019-11-07 13:35:39 +00:00
|
|
|
job.ID = &id
|
2015-09-09 01:42:34 +00:00
|
|
|
_, wm, err := jobs.Register(job, nil)
|
|
|
|
if err != nil {
|
2015-09-09 00:20:52 +00:00
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2015-09-09 01:42:34 +00:00
|
|
|
assertWriteMeta(t, wm)
|
2015-09-09 00:20:52 +00:00
|
|
|
|
|
|
|
// Query the job again and ensure it exists
|
2019-11-07 13:35:39 +00:00
|
|
|
result, qm, err := jobs.Info(id, nil)
|
2015-09-09 00:20:52 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2015-09-09 01:42:34 +00:00
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
|
|
|
|
// Check that the result is what we expect
|
2017-02-13 23:18:17 +00:00
|
|
|
if result == nil || *result.ID != *job.ID {
|
2015-09-09 00:20:52 +00:00
|
|
|
t.Fatalf("expect: %#v, got: %#v", job, result)
|
|
|
|
}
|
|
|
|
}
|
2015-09-09 00:49:31 +00:00
|
|
|
|
2017-04-13 23:55:21 +00:00
|
|
|
func TestJobs_Versions(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2017-04-13 23:55:21 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Trying to retrieve a job by ID before it exists returns an error
|
2017-07-04 20:08:20 +00:00
|
|
|
_, _, _, err := jobs.Versions("job1", false, nil)
|
2017-04-13 23:55:21 +00:00
|
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
|
|
t.Fatalf("expected not found error, got: %#v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the job
|
|
|
|
job := testJob()
|
|
|
|
_, wm, err := jobs.Register(job, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
// Query the job again and ensure it exists
|
2017-07-04 20:08:20 +00:00
|
|
|
result, _, qm, err := jobs.Versions("job1", false, nil)
|
2017-04-13 23:55:21 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
|
|
|
|
// Check that the result is what we expect
|
|
|
|
if len(result) == 0 || *result[0].ID != *job.ID {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", job, result)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-24 10:46:59 +00:00
|
|
|
func TestJobs_PrefixList(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-12-24 10:46:59 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Listing when nothing exists returns empty
|
2018-03-16 23:46:22 +00:00
|
|
|
results, _, err := jobs.PrefixList("dummy")
|
2015-12-24 10:46:59 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if n := len(results); n != 0 {
|
|
|
|
t.Fatalf("expected 0 jobs, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the job
|
|
|
|
job := testJob()
|
|
|
|
_, wm, err := jobs.Register(job, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
// Query the job again and ensure it exists
|
|
|
|
// Listing when nothing exists returns empty
|
2018-03-16 23:46:22 +00:00
|
|
|
results, _, err = jobs.PrefixList((*job.ID)[:1])
|
2015-12-24 10:46:59 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we have the right list
|
2017-02-06 19:48:28 +00:00
|
|
|
if len(results) != 1 || results[0].ID != *job.ID {
|
2015-12-24 10:46:59 +00:00
|
|
|
t.Fatalf("bad: %#v", results)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestJobs_List(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-12-24 10:46:59 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Listing when nothing exists returns empty
|
2018-03-16 23:46:22 +00:00
|
|
|
results, _, err := jobs.List(nil)
|
2015-12-24 10:46:59 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if n := len(results); n != 0 {
|
|
|
|
t.Fatalf("expected 0 jobs, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the job
|
|
|
|
job := testJob()
|
|
|
|
_, wm, err := jobs.Register(job, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
// Query the job again and ensure it exists
|
|
|
|
// Listing when nothing exists returns empty
|
2018-03-16 23:46:22 +00:00
|
|
|
results, _, err = jobs.List(nil)
|
2015-12-24 10:46:59 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we have the right list
|
2017-02-06 19:48:28 +00:00
|
|
|
if len(results) != 1 || results[0].ID != *job.ID {
|
2015-12-24 10:46:59 +00:00
|
|
|
t.Fatalf("bad: %#v", results)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-09 00:49:31 +00:00
|
|
|
func TestJobs_Allocations(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-09 00:49:31 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
2018-03-12 18:26:37 +00:00
|
|
|
// Looking up by a nonexistent job returns nothing
|
2016-12-20 19:32:17 +00:00
|
|
|
allocs, qm, err := jobs.Allocations("job1", true, nil)
|
2015-09-09 00:49:31 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if qm.LastIndex != 0 {
|
2015-09-09 01:42:34 +00:00
|
|
|
t.Fatalf("bad index: %d", qm.LastIndex)
|
2015-09-09 00:49:31 +00:00
|
|
|
}
|
|
|
|
if n := len(allocs); n != 0 {
|
|
|
|
t.Fatalf("expected 0 allocs, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: do something here to create some allocations for
|
|
|
|
// an existing job, lookup again.
|
|
|
|
}
|
2015-09-09 01:42:34 +00:00
|
|
|
|
|
|
|
func TestJobs_Evaluations(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-09 01:42:34 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
2018-03-12 18:26:37 +00:00
|
|
|
// Looking up by a nonexistent job ID returns nothing
|
2015-09-09 20:18:50 +00:00
|
|
|
evals, qm, err := jobs.Evaluations("job1", nil)
|
2015-09-09 01:42:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if qm.LastIndex != 0 {
|
|
|
|
t.Fatalf("bad index: %d", qm.LastIndex)
|
|
|
|
}
|
|
|
|
if n := len(evals); n != 0 {
|
|
|
|
t.Fatalf("expected 0 evals, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert a job. This also creates an evaluation so we should
|
|
|
|
// be able to query that out after.
|
|
|
|
job := testJob()
|
2017-05-10 03:52:47 +00:00
|
|
|
resp, wm, err := jobs.Register(job, nil)
|
2015-09-09 01:42:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
// Look up the evaluations again.
|
2015-09-09 20:18:50 +00:00
|
|
|
evals, qm, err = jobs.Evaluations("job1", nil)
|
2015-09-09 01:42:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
|
2016-08-08 03:30:48 +00:00
|
|
|
// Check that we got the evals back, evals are in order most recent to least recent
|
|
|
|
// so the last eval is the original registered eval
|
|
|
|
idx := len(evals) - 1
|
2017-05-10 03:52:47 +00:00
|
|
|
if n := len(evals); n == 0 || evals[idx].ID != resp.EvalID {
|
|
|
|
t.Fatalf("expected >= 1 eval (%s), got: %#v", resp.EvalID, evals[idx])
|
2015-09-09 01:42:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-17 00:16:01 +00:00
|
|
|
func TestJobs_Deregister(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-09 01:42:34 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Register a new job
|
|
|
|
job := testJob()
|
|
|
|
_, wm, err := jobs.Register(job, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
2015-12-09 01:26:22 +00:00
|
|
|
// Attempting delete on non-existing job returns an error
|
2017-04-15 03:54:30 +00:00
|
|
|
if _, _, err = jobs.Deregister("nope", false, nil); err != nil {
|
2016-03-25 18:38:18 +00:00
|
|
|
t.Fatalf("unexpected error deregistering job: %v", err)
|
2015-09-09 01:42:34 +00:00
|
|
|
}
|
|
|
|
|
2017-04-15 03:54:30 +00:00
|
|
|
// Do a soft deregister of an existing job
|
|
|
|
evalID, wm3, err := jobs.Deregister("job1", false, nil)
|
2015-09-09 01:42:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm3)
|
2015-09-17 00:12:48 +00:00
|
|
|
if evalID == "" {
|
|
|
|
t.Fatalf("missing eval ID")
|
|
|
|
}
|
2015-09-09 01:42:34 +00:00
|
|
|
|
2017-04-15 03:54:30 +00:00
|
|
|
// Check that the job is still queryable
|
|
|
|
out, qm1, err := jobs.Info("job1", nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertQueryMeta(t, qm1)
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("missing job")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Do a purge deregister of an existing job
|
|
|
|
evalID, wm4, err := jobs.Deregister("job1", true, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm4)
|
|
|
|
if evalID == "" {
|
|
|
|
t.Fatalf("missing eval ID")
|
|
|
|
}
|
|
|
|
|
2015-09-09 01:42:34 +00:00
|
|
|
// Check that the job is really gone
|
2015-09-09 20:18:50 +00:00
|
|
|
result, qm, err := jobs.List(nil)
|
2015-09-09 01:42:34 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
if n := len(result); n != 0 {
|
|
|
|
t.Fatalf("expected 0 jobs, got: %d", n)
|
|
|
|
}
|
|
|
|
}
|
2015-09-10 00:29:43 +00:00
|
|
|
|
2015-09-10 01:39:24 +00:00
|
|
|
func TestJobs_ForceEvaluate(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 01:39:24 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
2018-03-12 18:26:37 +00:00
|
|
|
// Force-eval on a non-existent job fails
|
2015-09-10 01:39:24 +00:00
|
|
|
_, _, err := jobs.ForceEvaluate("job1", nil)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
|
|
t.Fatalf("expected not found error, got: %#v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new job
|
|
|
|
_, wm, err := jobs.Register(testJob(), nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
// Try force-eval again
|
|
|
|
evalID, wm, err := jobs.ForceEvaluate("job1", nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
// Retrieve the evals and see if we get a matching one
|
|
|
|
evals, qm, err := jobs.Evaluations("job1", nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
for _, eval := range evals {
|
|
|
|
if eval.ID == evalID {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
t.Fatalf("evaluation %q missing", evalID)
|
|
|
|
}
|
|
|
|
|
2016-01-19 19:09:36 +00:00
|
|
|
func TestJobs_PeriodicForce(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2016-01-19 19:09:36 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
2018-03-12 18:26:37 +00:00
|
|
|
// Force-eval on a nonexistent job fails
|
2016-01-19 19:09:36 +00:00
|
|
|
_, _, err := jobs.PeriodicForce("job1", nil)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
|
|
t.Fatalf("expected not found error, got: %#v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a new job
|
|
|
|
job := testPeriodicJob()
|
|
|
|
_, _, err = jobs.Register(job, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
2017-02-06 19:48:28 +00:00
|
|
|
out, _, err := jobs.Info(*job.ID, nil)
|
2017-02-13 23:18:17 +00:00
|
|
|
if err != nil || out == nil || *out.ID != *job.ID {
|
2016-01-19 19:09:36 +00:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Try force again
|
2017-02-06 19:48:28 +00:00
|
|
|
evalID, wm, err := jobs.PeriodicForce(*job.ID, nil)
|
2016-01-19 19:09:36 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
if evalID == "" {
|
|
|
|
t.Fatalf("empty evalID")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve the eval
|
|
|
|
evals := c.Evaluations()
|
|
|
|
eval, qm, err := evals.Info(evalID, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
if eval.ID == evalID {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t.Fatalf("evaluation %q missing", evalID)
|
|
|
|
}
|
|
|
|
|
2016-05-12 01:51:48 +00:00
|
|
|
func TestJobs_Plan(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2016-05-12 01:51:48 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Create a job and attempt to register it
|
|
|
|
job := testJob()
|
2017-05-10 03:52:47 +00:00
|
|
|
resp, wm, err := jobs.Register(job, nil)
|
2016-05-12 01:51:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
2017-05-10 03:52:47 +00:00
|
|
|
if resp == nil || resp.EvalID == "" {
|
2016-05-12 01:51:48 +00:00
|
|
|
t.Fatalf("missing eval id")
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
// Check that passing a nil job fails
|
|
|
|
if _, _, err := jobs.Plan(nil, true, nil); err == nil {
|
|
|
|
t.Fatalf("expect an error when job isn't provided")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make a plan request
|
|
|
|
planResp, wm, err := jobs.Plan(job, true, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
if planResp == nil {
|
|
|
|
t.Fatalf("nil response")
|
|
|
|
}
|
|
|
|
|
2016-05-16 19:49:18 +00:00
|
|
|
if planResp.JobModifyIndex == 0 {
|
|
|
|
t.Fatalf("bad JobModifyIndex value: %#v", planResp)
|
2016-05-12 01:51:48 +00:00
|
|
|
}
|
|
|
|
if planResp.Diff == nil {
|
|
|
|
t.Fatalf("got nil diff: %#v", planResp)
|
|
|
|
}
|
2016-05-12 18:29:38 +00:00
|
|
|
if planResp.Annotations == nil {
|
|
|
|
t.Fatalf("got nil annotations: %#v", planResp)
|
2016-05-12 01:51:48 +00:00
|
|
|
}
|
|
|
|
// Can make this assertion because there are no clients.
|
|
|
|
if len(planResp.CreatedEvals) == 0 {
|
|
|
|
t.Fatalf("got no CreatedEvals: %#v", planResp)
|
|
|
|
}
|
2017-09-26 22:26:33 +00:00
|
|
|
assertWriteMeta(t, wm)
|
2016-05-12 01:51:48 +00:00
|
|
|
|
|
|
|
// Make a plan request w/o the diff
|
|
|
|
planResp, wm, err = jobs.Plan(job, false, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
|
|
|
if planResp == nil {
|
|
|
|
t.Fatalf("nil response")
|
|
|
|
}
|
|
|
|
|
2016-05-16 19:49:18 +00:00
|
|
|
if planResp.JobModifyIndex == 0 {
|
|
|
|
t.Fatalf("bad JobModifyIndex value: %d", planResp.JobModifyIndex)
|
2016-05-12 01:51:48 +00:00
|
|
|
}
|
|
|
|
if planResp.Diff != nil {
|
|
|
|
t.Fatalf("got non-nil diff: %#v", planResp)
|
|
|
|
}
|
2016-05-12 18:29:38 +00:00
|
|
|
if planResp.Annotations == nil {
|
|
|
|
t.Fatalf("got nil annotations: %#v", planResp)
|
2016-05-12 01:51:48 +00:00
|
|
|
}
|
|
|
|
// Can make this assertion because there are no clients.
|
|
|
|
if len(planResp.CreatedEvals) == 0 {
|
|
|
|
t.Fatalf("got no CreatedEvals: %#v", planResp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-21 20:34:19 +00:00
|
|
|
func TestJobs_JobSummary(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2016-07-21 20:34:19 +00:00
|
|
|
c, s := makeClient(t, nil, nil)
|
|
|
|
defer s.Stop()
|
|
|
|
jobs := c.Jobs()
|
|
|
|
|
|
|
|
// Trying to retrieve a job summary before the job exists
|
|
|
|
// returns an error
|
|
|
|
_, _, err := jobs.Summary("job1", nil)
|
|
|
|
if err == nil || !strings.Contains(err.Error(), "not found") {
|
|
|
|
t.Fatalf("expected not found error, got: %#v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register the job
|
|
|
|
job := testJob()
|
2016-08-08 03:51:24 +00:00
|
|
|
taskName := job.TaskGroups[0].Name
|
2016-07-21 20:34:19 +00:00
|
|
|
_, wm, err := jobs.Register(job, nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertWriteMeta(t, wm)
|
|
|
|
|
2016-07-21 21:43:21 +00:00
|
|
|
// Query the job summary again and ensure it exists
|
2016-07-21 20:34:19 +00:00
|
|
|
result, qm, err := jobs.Summary("job1", nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %s", err)
|
|
|
|
}
|
|
|
|
assertQueryMeta(t, qm)
|
|
|
|
|
|
|
|
// Check that the result is what we expect
|
2017-02-06 19:48:28 +00:00
|
|
|
if *job.ID != result.JobID {
|
2017-02-28 00:00:19 +00:00
|
|
|
t.Fatalf("err: expected job id of %s saw %s", *job.ID, result.JobID)
|
2016-08-08 03:51:24 +00:00
|
|
|
}
|
2017-02-06 19:48:28 +00:00
|
|
|
if _, ok := result.Summary[*taskName]; !ok {
|
2017-02-28 00:00:19 +00:00
|
|
|
t.Fatalf("err: unable to find %s key in job summary", *taskName)
|
2016-07-21 20:34:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-10 00:29:43 +00:00
|
|
|
func TestJobs_NewBatchJob(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2019-05-02 20:00:21 +00:00
|
|
|
job := NewBatchJob("job1", "myjob", "global", 5)
|
2015-09-10 00:29:43 +00:00
|
|
|
expect := &Job{
|
2019-05-02 20:00:21 +00:00
|
|
|
Region: stringToPtr("global"),
|
2019-01-18 18:28:35 +00:00
|
|
|
ID: stringToPtr("job1"),
|
|
|
|
Name: stringToPtr("myjob"),
|
|
|
|
Type: stringToPtr(JobTypeBatch),
|
|
|
|
Priority: intToPtr(5),
|
2015-09-10 00:29:43 +00:00
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(job, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, job)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestJobs_NewServiceJob(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2019-05-02 20:00:21 +00:00
|
|
|
job := NewServiceJob("job1", "myjob", "global", 5)
|
2015-09-10 00:29:43 +00:00
|
|
|
expect := &Job{
|
2019-05-02 20:00:21 +00:00
|
|
|
Region: stringToPtr("global"),
|
2019-01-18 18:28:35 +00:00
|
|
|
ID: stringToPtr("job1"),
|
|
|
|
Name: stringToPtr("myjob"),
|
|
|
|
Type: stringToPtr(JobTypeService),
|
|
|
|
Priority: intToPtr(5),
|
2015-09-10 00:29:43 +00:00
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(job, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, job)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestJobs_SetMeta(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:29:43 +00:00
|
|
|
job := &Job{Meta: nil}
|
|
|
|
|
|
|
|
// Initializes a nil map
|
|
|
|
out := job.SetMeta("foo", "bar")
|
|
|
|
if job.Meta == nil {
|
|
|
|
t.Fatalf("should initialize metadata")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the job was returned
|
|
|
|
if job != out {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", job, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setting another pair is additive
|
|
|
|
job.SetMeta("baz", "zip")
|
|
|
|
expect := map[string]string{"foo": "bar", "baz": "zip"}
|
|
|
|
if !reflect.DeepEqual(job.Meta, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, job.Meta)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestJobs_Constrain(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:29:43 +00:00
|
|
|
job := &Job{Constraints: nil}
|
|
|
|
|
|
|
|
// Create and add a constraint
|
2015-10-27 21:31:14 +00:00
|
|
|
out := job.Constrain(NewConstraint("kernel.name", "=", "darwin"))
|
2015-09-10 00:29:43 +00:00
|
|
|
if n := len(job.Constraints); n != 1 {
|
|
|
|
t.Fatalf("expected 1 constraint, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the job was returned
|
|
|
|
if job != out {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", job, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adding another constraint preserves the original
|
2015-10-27 21:31:14 +00:00
|
|
|
job.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
|
2015-09-10 00:29:43 +00:00
|
|
|
expect := []*Constraint{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:29:43 +00:00
|
|
|
LTarget: "kernel.name",
|
|
|
|
RTarget: "darwin",
|
|
|
|
Operand: "=",
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:29:43 +00:00
|
|
|
LTarget: "memory.totalbytes",
|
|
|
|
RTarget: "128000000",
|
|
|
|
Operand: ">=",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(job.Constraints, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, job.Constraints)
|
|
|
|
}
|
|
|
|
}
|
2015-09-17 20:15:45 +00:00
|
|
|
|
2018-07-16 13:30:58 +00:00
|
|
|
func TestJobs_AddAffinity(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
job := &Job{Affinities: nil}
|
|
|
|
|
|
|
|
// Create and add an affinity
|
|
|
|
out := job.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
|
|
|
|
if n := len(job.Affinities); n != 1 {
|
|
|
|
t.Fatalf("expected 1 affinity, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the job was returned
|
|
|
|
if job != out {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", job, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adding another affinity preserves the original
|
|
|
|
job.AddAffinity(NewAffinity("${node.datacenter}", "=", "dc2", 50))
|
|
|
|
expect := []*Affinity{
|
|
|
|
{
|
|
|
|
LTarget: "kernel.version",
|
|
|
|
RTarget: "4.6",
|
|
|
|
Operand: "=",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(100),
|
2018-07-16 13:30:58 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
LTarget: "${node.datacenter}",
|
|
|
|
RTarget: "dc2",
|
|
|
|
Operand: "=",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(50),
|
2018-07-16 13:30:58 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(job.Affinities, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, job.Affinities)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-17 20:15:45 +00:00
|
|
|
func TestJobs_Sort(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-17 20:15:45 +00:00
|
|
|
jobs := []*JobListStub{
|
2017-09-26 22:26:33 +00:00
|
|
|
{ID: "job2"},
|
|
|
|
{ID: "job0"},
|
|
|
|
{ID: "job1"},
|
2015-09-17 20:15:45 +00:00
|
|
|
}
|
|
|
|
sort.Sort(JobIDSort(jobs))
|
|
|
|
|
|
|
|
expect := []*JobListStub{
|
2017-09-26 22:26:33 +00:00
|
|
|
{ID: "job0"},
|
|
|
|
{ID: "job1"},
|
|
|
|
{ID: "job2"},
|
2015-09-17 20:15:45 +00:00
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(jobs, expect) {
|
|
|
|
t.Fatalf("\n\n%#v\n\n%#v", jobs, expect)
|
|
|
|
}
|
|
|
|
}
|
2017-09-12 15:56:55 +00:00
|
|
|
|
2018-07-18 15:53:03 +00:00
|
|
|
func TestJobs_AddSpread(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
job := &Job{Spreads: nil}
|
|
|
|
|
|
|
|
// Create and add a Spread
|
|
|
|
spreadTarget := NewSpreadTarget("r1", 50)
|
|
|
|
|
|
|
|
spread := NewSpread("${meta.rack}", 100, []*SpreadTarget{spreadTarget})
|
|
|
|
out := job.AddSpread(spread)
|
|
|
|
if n := len(job.Spreads); n != 1 {
|
|
|
|
t.Fatalf("expected 1 spread, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the job was returned
|
|
|
|
if job != out {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", job, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adding another spread preserves the original
|
|
|
|
spreadTarget2 := NewSpreadTarget("dc1", 100)
|
|
|
|
|
|
|
|
spread2 := NewSpread("${node.datacenter}", 100, []*SpreadTarget{spreadTarget2})
|
|
|
|
job.AddSpread(spread2)
|
|
|
|
|
|
|
|
expect := []*Spread{
|
|
|
|
{
|
|
|
|
Attribute: "${meta.rack}",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(100),
|
2018-07-18 15:53:03 +00:00
|
|
|
SpreadTarget: []*SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "r1",
|
|
|
|
Percent: 50,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Attribute: "${node.datacenter}",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(100),
|
2018-07-18 15:53:03 +00:00
|
|
|
SpreadTarget: []*SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "dc1",
|
|
|
|
Percent: 100,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(job.Spreads, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, job.Spreads)
|
|
|
|
}
|
|
|
|
}
|