open-nomad/nomad/structs/structs_test.go

6897 lines
154 KiB
Go
Raw Normal View History

2015-06-05 22:21:17 +00:00
package structs
import (
"fmt"
"os"
2015-06-05 22:21:17 +00:00
"reflect"
2015-09-15 18:23:03 +00:00
"strings"
2015-06-05 22:21:17 +00:00
"testing"
"time"
2015-12-01 00:51:56 +00:00
2016-08-16 19:05:15 +00:00
"github.com/hashicorp/consul/api"
2019-03-05 21:41:41 +00:00
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/kr/pretty"
2017-08-12 21:36:10 +00:00
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
2015-06-05 22:21:17 +00:00
)
2015-09-15 18:23:03 +00:00
func TestJob_Validate(t *testing.T) {
ci.Parallel(t)
2015-09-15 18:23:03 +00:00
j := &Job{}
err := j.Validate()
requireErrors(t, err,
"datacenters",
"job ID",
"job name",
"job region",
"job type",
"namespace",
"priority",
"task groups",
)
2015-09-15 18:23:03 +00:00
2017-07-07 22:34:26 +00:00
j = &Job{
Type: "invalid-job-type",
}
err = j.Validate()
if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) {
t.Errorf("expected %s but found: %v", expected, err)
}
2015-12-01 00:51:56 +00:00
j = &Job{
Type: JobTypeService,
2015-12-01 16:40:32 +00:00
Periodic: &PeriodicConfig{
2015-12-01 00:51:56 +00:00
Enabled: true,
},
}
err = j.Validate()
require.Error(t, err, "Periodic")
2015-12-01 00:51:56 +00:00
2015-09-15 18:23:03 +00:00
j = &Job{
Region: "global",
ID: uuid.Generate(),
2017-09-07 23:56:15 +00:00
Namespace: "test",
2015-09-15 18:23:03 +00:00
Name: "my-job",
Type: JobTypeService,
Priority: 50,
Datacenters: []string{"dc1"},
TaskGroups: []*TaskGroup{
2017-09-26 22:26:33 +00:00
{
2015-09-15 18:23:03 +00:00
Name: "web",
RestartPolicy: &RestartPolicy{
Interval: 5 * time.Minute,
Delay: 10 * time.Second,
Attempts: 10,
},
2015-09-15 18:23:03 +00:00
},
2017-09-26 22:26:33 +00:00
{
2015-09-15 18:23:03 +00:00
Name: "web",
RestartPolicy: &RestartPolicy{
Interval: 5 * time.Minute,
Delay: 10 * time.Second,
Attempts: 10,
},
},
2017-09-26 22:26:33 +00:00
{
RestartPolicy: &RestartPolicy{
Interval: 5 * time.Minute,
Delay: 10 * time.Second,
Attempts: 10,
},
2015-09-15 18:23:03 +00:00
},
},
}
err = j.Validate()
requireErrors(t, err,
"2 redefines 'web' from group 1",
"group 3 missing name",
"Task group web validation failed",
)
// test for empty datacenters
j = &Job{
Datacenters: []string{""},
}
err = j.Validate()
require.Error(t, err, "datacenter must be non-empty string")
2015-09-15 18:23:03 +00:00
}
func TestJob_ValidateScaling(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
p := &ScalingPolicy{
Policy: nil, // allowed to be nil
2020-09-29 21:57:46 +00:00
Type: ScalingPolicyTypeHorizontal,
Min: 5,
Max: 5,
Enabled: true,
}
job := testJob()
job.TaskGroups[0].Scaling = p
job.TaskGroups[0].Count = 5
require.NoError(job.Validate())
// min <= max
p.Max = 0
p.Min = 10
err := job.Validate()
requireErrors(t, err,
"task group count must not be less than minimum count in scaling policy",
"task group count must not be greater than maximum count in scaling policy",
)
// count <= max
p.Max = 0
p.Min = 5
job.TaskGroups[0].Count = 5
err = job.Validate()
require.Error(err,
"task group count must not be greater than maximum count in scaling policy",
)
// min <= count
job.TaskGroups[0].Count = 0
p.Min = 5
p.Max = 5
err = job.Validate()
require.Error(err,
"task group count must not be less than minimum count in scaling policy",
)
}
func TestJob_ValidateNullChar(t *testing.T) {
ci.Parallel(t)
assert := assert.New(t)
// job id should not allow null characters
job := testJob()
job.ID = "id_with\000null_character"
assert.Error(job.Validate(), "null character in job ID should not validate")
// job name should not allow null characters
job.ID = "happy_little_job_id"
job.Name = "my job name with \000 characters"
assert.Error(job.Validate(), "null character in job name should not validate")
// task group name should not allow null characters
job.Name = "my job"
job.TaskGroups[0].Name = "oh_no_another_\000_char"
assert.Error(job.Validate(), "null character in task group name should not validate")
// task name should not allow null characters
job.TaskGroups[0].Name = "so_much_better"
job.TaskGroups[0].Tasks[0].Name = "ive_had_it_with_these_\000_chars_in_these_names"
assert.Error(job.Validate(), "null character in task name should not validate")
}
func TestJob_Warnings(t *testing.T) {
ci.Parallel(t)
cases := []struct {
Name string
Job *Job
Expected []string
}{
{
Name: "Higher counts for update stanza",
Expected: []string{"max parallel count is greater"},
Job: &Job{
Type: JobTypeService,
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
Update: &UpdateStrategy{
MaxParallel: 10,
},
},
},
},
},
{
Name: "AutoPromote mixed TaskGroups",
Expected: []string{"auto_promote must be true for all groups"},
Job: &Job{
Type: JobTypeService,
TaskGroups: []*TaskGroup{
{
Update: &UpdateStrategy{
AutoPromote: true,
},
},
{
Update: &UpdateStrategy{
AutoPromote: false,
Canary: 1,
},
},
},
},
},
{
Name: "no error for mixed but implied AutoPromote",
Expected: []string{},
Job: &Job{
Type: JobTypeService,
TaskGroups: []*TaskGroup{
{
Update: &UpdateStrategy{
AutoPromote: true,
},
},
{
Update: &UpdateStrategy{
AutoPromote: false,
Canary: 0,
},
},
},
},
},
{
Name: "Template.VaultGrace Deprecated",
Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."},
Job: &Job{
Type: JobTypeService,
TaskGroups: []*TaskGroup{
{
Tasks: []*Task{
{
Templates: []*Template{
{
VaultGrace: 1,
},
},
},
},
},
},
},
},
}
for _, c := range cases {
t.Run(c.Name, func(t *testing.T) {
warnings := c.Job.Warnings()
if warnings == nil {
if len(c.Expected) == 0 {
return
}
t.Fatal("Got no warnings when they were expected")
}
a := warnings.Error()
for _, e := range c.Expected {
if !strings.Contains(a, e) {
t.Fatalf("Got warnings %q; didn't contain %q", a, e)
}
}
})
}
}
func TestJob_SpecChanged(t *testing.T) {
ci.Parallel(t)
// Get a base test job
base := testJob()
// Only modify the indexes/mutable state of the job
mutatedBase := base.Copy()
mutatedBase.Status = "foo"
mutatedBase.ModifyIndex = base.ModifyIndex + 100
// changed contains a spec change that should be detected
change := base.Copy()
change.Priority = 99
cases := []struct {
Name string
Original *Job
New *Job
Changed bool
}{
{
Name: "Same job except mutable indexes",
Changed: false,
Original: base,
New: mutatedBase,
},
{
Name: "Different",
Changed: true,
Original: base,
New: change,
},
{
Name: "With Constraints",
Changed: false,
Original: &Job{Constraints: []*Constraint{{"A", "B", "="}}},
New: &Job{Constraints: []*Constraint{{"A", "B", "="}}},
},
{
Name: "With Affinities",
Changed: false,
Original: &Job{Affinities: []*Affinity{{"A", "B", "=", 1}}},
New: &Job{Affinities: []*Affinity{{"A", "B", "=", 1}}},
},
}
for _, c := range cases {
t.Run(c.Name, func(t *testing.T) {
if actual := c.Original.SpecChanged(c.New); actual != c.Changed {
t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed)
}
})
}
}
2016-02-21 01:43:17 +00:00
func testJob() *Job {
return &Job{
Region: "global",
ID: uuid.Generate(),
2017-09-07 23:56:15 +00:00
Namespace: "test",
Name: "my-job",
Type: JobTypeService,
Priority: 50,
AllAtOnce: false,
Datacenters: []string{"dc1"},
Constraints: []*Constraint{
2017-09-26 22:26:33 +00:00
{
LTarget: "$attr.kernel.name",
RTarget: "linux",
Operand: "=",
},
},
Periodic: &PeriodicConfig{
Enabled: false,
},
TaskGroups: []*TaskGroup{
2017-09-26 22:26:33 +00:00
{
Name: "web",
Count: 10,
EphemeralDisk: DefaultEphemeralDisk(),
RestartPolicy: &RestartPolicy{
2016-07-13 19:50:08 +00:00
Mode: RestartPolicyModeFail,
Attempts: 3,
Interval: 10 * time.Minute,
Delay: 1 * time.Minute,
},
ReschedulePolicy: &ReschedulePolicy{
Interval: 5 * time.Minute,
Attempts: 10,
Delay: 5 * time.Second,
2018-03-26 19:45:09 +00:00
DelayFunction: "constant",
},
Networks: []*NetworkResource{
{
DynamicPorts: []Port{
{Label: "http"},
},
},
},
Services: []*Service{
{
Name: "${TASK}-frontend",
PortLabel: "http",
Provider: "consul",
},
},
Tasks: []*Task{
2017-09-26 22:26:33 +00:00
{
Name: "web",
Driver: "exec",
Config: map[string]interface{}{
"command": "/bin/date",
},
Env: map[string]string{
"FOO": "bar",
},
2016-03-16 03:21:52 +00:00
Artifacts: []*TaskArtifact{
{
GetterSource: "http://foo.com",
},
},
Resources: &Resources{
CPU: 500,
MemoryMB: 256,
},
2016-07-13 19:50:08 +00:00
LogConfig: &LogConfig{
MaxFiles: 10,
MaxFileSizeMB: 1,
},
},
},
Meta: map[string]string{
"elb_check_type": "http",
"elb_check_interval": "30s",
"elb_check_min": "3",
},
},
},
Meta: map[string]string{
"owner": "armon",
},
}
2016-02-21 01:43:17 +00:00
}
2016-02-21 01:43:17 +00:00
func TestJob_Copy(t *testing.T) {
ci.Parallel(t)
2016-02-21 01:43:17 +00:00
j := testJob()
2015-12-18 20:26:28 +00:00
c := j.Copy()
if !reflect.DeepEqual(j, c) {
2016-02-11 17:08:20 +00:00
t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j)
2015-12-18 20:26:28 +00:00
}
}
2015-12-01 16:40:32 +00:00
func TestJob_IsPeriodic(t *testing.T) {
ci.Parallel(t)
2015-12-01 16:40:32 +00:00
j := &Job{
Type: JobTypeService,
Periodic: &PeriodicConfig{
Enabled: true,
},
}
if !j.IsPeriodic() {
t.Fatalf("IsPeriodic() returned false on periodic job")
}
j = &Job{
Type: JobTypeService,
}
if j.IsPeriodic() {
t.Fatalf("IsPeriodic() returned true on non-periodic job")
}
}
func TestJob_IsPeriodicActive(t *testing.T) {
ci.Parallel(t)
cases := []struct {
job *Job
active bool
}{
{
job: &Job{
Type: JobTypeService,
Periodic: &PeriodicConfig{
Enabled: true,
},
},
active: true,
},
{
job: &Job{
Type: JobTypeService,
Periodic: &PeriodicConfig{
Enabled: false,
},
},
active: false,
},
{
job: &Job{
Type: JobTypeService,
Periodic: &PeriodicConfig{
Enabled: true,
},
Stop: true,
},
active: false,
},
{
job: &Job{
Type: JobTypeService,
Periodic: &PeriodicConfig{
Enabled: false,
},
ParameterizedJob: &ParameterizedJobConfig{},
},
active: false,
},
}
for i, c := range cases {
if act := c.job.IsPeriodicActive(); act != c.active {
t.Fatalf("case %d failed: got %v; want %v", i, act, c.active)
}
}
}
2016-07-13 19:50:08 +00:00
func TestJob_SystemJob_Validate(t *testing.T) {
j := testJob()
j.Type = JobTypeSystem
2018-04-11 20:49:23 +00:00
j.TaskGroups[0].ReschedulePolicy = nil
2016-07-20 23:07:15 +00:00
j.Canonicalize()
2016-07-13 19:50:08 +00:00
err := j.Validate()
if err == nil || !strings.Contains(err.Error(), "exceed") {
t.Fatalf("expect error due to count")
}
j.TaskGroups[0].Count = 0
if err := j.Validate(); err != nil {
t.Fatalf("unexpected err: %v", err)
}
j.TaskGroups[0].Count = 1
if err := j.Validate(); err != nil {
t.Fatalf("unexpected err: %v", err)
}
// Add affinities at job, task group and task level, that should fail validation
j.Affinities = []*Affinity{{
Operand: "=",
LTarget: "${node.datacenter}",
RTarget: "dc1",
}}
j.TaskGroups[0].Affinities = []*Affinity{{
Operand: "=",
LTarget: "${meta.rack}",
RTarget: "r1",
}}
j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{
Operand: "=",
LTarget: "${meta.rack}",
RTarget: "r1",
}}
err = j.Validate()
require.NotNil(t, err)
2018-07-24 15:37:13 +00:00
require.Contains(t, err.Error(), "System jobs may not have an affinity stanza")
// Add spread at job and task group level, that should fail validation
j.Spreads = []*Spread{{
Attribute: "${node.datacenter}",
Weight: 100,
}}
j.TaskGroups[0].Spreads = []*Spread{{
Attribute: "${node.datacenter}",
Weight: 100,
}}
err = j.Validate()
require.NotNil(t, err)
require.Contains(t, err.Error(), "System jobs may not have a spread stanza")
2016-07-13 19:50:08 +00:00
}
func TestJob_Vault(t *testing.T) {
ci.Parallel(t)
2016-08-17 00:50:14 +00:00
j0 := &Job{}
2016-08-18 17:50:47 +00:00
e0 := make(map[string]map[string]*Vault, 0)
2016-08-17 00:50:14 +00:00
2016-08-18 17:50:47 +00:00
vj1 := &Vault{
Policies: []string{
"p1",
"p2",
},
}
vj2 := &Vault{
Policies: []string{
"p3",
"p4",
},
}
vj3 := &Vault{
Policies: []string{
"p5",
},
EntityAlias: "alias1",
2016-08-18 17:50:47 +00:00
}
2016-08-17 00:50:14 +00:00
j1 := &Job{
TaskGroups: []*TaskGroup{
2017-09-26 22:26:33 +00:00
{
2016-08-17 00:50:14 +00:00
Name: "foo",
Tasks: []*Task{
2017-09-26 22:26:33 +00:00
{
2016-08-17 00:50:14 +00:00
Name: "t1",
},
2017-09-26 22:26:33 +00:00
{
2016-08-18 17:50:47 +00:00
Name: "t2",
Vault: vj1,
2016-08-17 00:50:14 +00:00
},
},
},
2017-09-26 22:26:33 +00:00
{
2016-08-17 00:50:14 +00:00
Name: "bar",
Tasks: []*Task{
2017-09-26 22:26:33 +00:00
{
2016-08-18 17:50:47 +00:00
Name: "t3",
Vault: vj2,
2016-08-17 00:50:14 +00:00
},
2017-09-26 22:26:33 +00:00
{
2016-08-18 17:50:47 +00:00
Name: "t4",
Vault: vj3,
2016-08-17 00:50:14 +00:00
},
},
},
},
}
2016-08-18 17:50:47 +00:00
e1 := map[string]map[string]*Vault{
2017-09-26 22:26:33 +00:00
"foo": {
2016-08-18 17:50:47 +00:00
"t2": vj1,
2016-08-17 00:50:14 +00:00
},
2017-09-26 22:26:33 +00:00
"bar": {
2016-08-18 17:50:47 +00:00
"t3": vj2,
"t4": vj3,
2016-08-17 00:50:14 +00:00
},
}
cases := []struct {
Job *Job
2016-08-18 17:50:47 +00:00
Expected map[string]map[string]*Vault
2016-08-17 00:50:14 +00:00
}{
{
Job: j0,
Expected: e0,
},
{
Job: j1,
Expected: e1,
},
}
for i, c := range cases {
got := c.Job.Vault()
2016-08-17 00:50:14 +00:00
if !reflect.DeepEqual(got, c.Expected) {
t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected)
}
}
}
func TestJob_ConnectTasks(t *testing.T) {
ci.Parallel(t)
r := require.New(t)
j0 := &Job{
TaskGroups: []*TaskGroup{{
Name: "tg1",
Tasks: []*Task{{
Name: "connect-proxy-task1",
Kind: "connect-proxy:task1",
}, {
Name: "task2",
Kind: "task2",
}, {
Name: "connect-proxy-task3",
Kind: "connect-proxy:task3",
}},
}, {
Name: "tg2",
Tasks: []*Task{{
Name: "task1",
Kind: "task1",
}, {
Name: "connect-proxy-task2",
Kind: "connect-proxy:task2",
}},
}, {
Name: "tg3",
Tasks: []*Task{{
Name: "ingress",
Kind: "connect-ingress:ingress",
}},
}, {
Name: "tg4",
Tasks: []*Task{{
Name: "frontend",
Kind: "connect-native:uuid-fe",
}, {
Name: "generator",
Kind: "connect-native:uuid-api",
}},
}, {
Name: "tg5",
Tasks: []*Task{{
Name: "t1000",
Kind: "connect-terminating:t1000",
}},
}},
}
connectTasks := j0.ConnectTasks()
exp := []TaskKind{
NewTaskKind(ConnectProxyPrefix, "task1"),
NewTaskKind(ConnectProxyPrefix, "task3"),
NewTaskKind(ConnectProxyPrefix, "task2"),
NewTaskKind(ConnectIngressPrefix, "ingress"),
NewTaskKind(ConnectNativePrefix, "uuid-fe"),
NewTaskKind(ConnectNativePrefix, "uuid-api"),
NewTaskKind(ConnectTerminatingPrefix, "t1000"),
}
r.Equal(exp, connectTasks)
}
func TestJob_RequiredSignals(t *testing.T) {
ci.Parallel(t)
j0 := &Job{}
e0 := make(map[string]map[string][]string, 0)
vj1 := &Vault{
Policies: []string{"p1"},
ChangeMode: VaultChangeModeNoop,
}
vj2 := &Vault{
Policies: []string{"p1"},
ChangeMode: VaultChangeModeSignal,
ChangeSignal: "SIGUSR1",
}
tj1 := &Template{
SourcePath: "foo",
DestPath: "bar",
ChangeMode: TemplateChangeModeNoop,
}
tj2 := &Template{
SourcePath: "foo",
DestPath: "bar",
ChangeMode: TemplateChangeModeSignal,
ChangeSignal: "SIGUSR2",
}
j1 := &Job{
TaskGroups: []*TaskGroup{
2017-09-26 22:26:33 +00:00
{
Name: "foo",
Tasks: []*Task{
2017-09-26 22:26:33 +00:00
{
Name: "t1",
},
2017-09-26 22:26:33 +00:00
{
Name: "t2",
Vault: vj2,
Templates: []*Template{tj2},
},
},
},
2017-09-26 22:26:33 +00:00
{
Name: "bar",
Tasks: []*Task{
2017-09-26 22:26:33 +00:00
{
Name: "t3",
Vault: vj1,
Templates: []*Template{tj1},
},
2017-09-26 22:26:33 +00:00
{
Name: "t4",
Vault: vj2,
},
},
},
},
}
e1 := map[string]map[string][]string{
2017-09-26 22:26:33 +00:00
"foo": {
"t2": {"SIGUSR1", "SIGUSR2"},
},
2017-09-26 22:26:33 +00:00
"bar": {
"t4": {"SIGUSR1"},
},
}
j2 := &Job{
TaskGroups: []*TaskGroup{
{
Name: "foo",
Tasks: []*Task{
{
Name: "t1",
KillSignal: "SIGQUIT",
},
},
},
},
}
e2 := map[string]map[string][]string{
"foo": {
"t1": {"SIGQUIT"},
},
}
cases := []struct {
Job *Job
Expected map[string]map[string][]string
}{
{
Job: j0,
Expected: e0,
},
{
Job: j1,
Expected: e1,
},
{
Job: j2,
Expected: e2,
},
}
for i, c := range cases {
got := c.Job.RequiredSignals()
if !reflect.DeepEqual(got, c.Expected) {
t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected)
}
}
}
// test new Equal comparisons for components of Jobs
func TestJob_PartEqual(t *testing.T) {
ci.Parallel(t)
ns := &Networks{}
require.True(t, ns.Equals(&Networks{}))
ns = &Networks{
&NetworkResource{Device: "eth0"},
}
require.True(t, ns.Equals(&Networks{
&NetworkResource{Device: "eth0"},
}))
ns = &Networks{
&NetworkResource{Device: "eth0"},
&NetworkResource{Device: "eth1"},
&NetworkResource{Device: "eth2"},
}
require.True(t, ns.Equals(&Networks{
&NetworkResource{Device: "eth2"},
&NetworkResource{Device: "eth0"},
&NetworkResource{Device: "eth1"},
}))
cs := &Constraints{
2021-07-14 15:39:38 +00:00
&Constraint{"left0", "right0", "="},
&Constraint{"left1", "right1", "="},
&Constraint{"left2", "right2", "="},
}
require.True(t, cs.Equals(&Constraints{
2021-07-14 15:39:38 +00:00
&Constraint{"left0", "right0", "="},
&Constraint{"left2", "right2", "="},
&Constraint{"left1", "right1", "="},
}))
as := &Affinities{
&Affinity{"left0", "right0", "=", 0},
&Affinity{"left1", "right1", "=", 0},
&Affinity{"left2", "right2", "=", 0},
}
require.True(t, as.Equals(&Affinities{
&Affinity{"left0", "right0", "=", 0},
&Affinity{"left2", "right2", "=", 0},
&Affinity{"left1", "right1", "=", 0},
}))
}
func TestTask_UsesConnect(t *testing.T) {
ci.Parallel(t)
t.Run("normal task", func(t *testing.T) {
task := testJob().TaskGroups[0].Tasks[0]
usesConnect := task.UsesConnect()
require.False(t, usesConnect)
})
t.Run("sidecar proxy", func(t *testing.T) {
task := &Task{
Name: "connect-proxy-task1",
Kind: NewTaskKind(ConnectProxyPrefix, "task1"),
}
usesConnect := task.UsesConnect()
require.True(t, usesConnect)
})
t.Run("native task", func(t *testing.T) {
task := &Task{
Name: "task1",
Kind: NewTaskKind(ConnectNativePrefix, "task1"),
}
usesConnect := task.UsesConnect()
require.True(t, usesConnect)
})
t.Run("ingress gateway", func(t *testing.T) {
task := &Task{
Name: "task1",
Kind: NewTaskKind(ConnectIngressPrefix, "task1"),
}
usesConnect := task.UsesConnect()
require.True(t, usesConnect)
})
t.Run("terminating gateway", func(t *testing.T) {
task := &Task{
Name: "task1",
Kind: NewTaskKind(ConnectTerminatingPrefix, "task1"),
}
usesConnect := task.UsesConnect()
require.True(t, usesConnect)
})
}
func TestTaskGroup_UsesConnect(t *testing.T) {
ci.Parallel(t)
try := func(t *testing.T, tg *TaskGroup, exp bool) {
result := tg.UsesConnect()
require.Equal(t, exp, result)
}
t.Run("tg uses native", func(t *testing.T) {
try(t, &TaskGroup{
Services: []*Service{
{Connect: nil},
{Connect: &ConsulConnect{Native: true}},
},
}, true)
})
t.Run("tg uses sidecar", func(t *testing.T) {
try(t, &TaskGroup{
Services: []*Service{{
Connect: &ConsulConnect{
SidecarService: &ConsulSidecarService{
Port: "9090",
},
},
}},
}, true)
})
t.Run("tg uses gateway", func(t *testing.T) {
try(t, &TaskGroup{
Services: []*Service{{
Connect: &ConsulConnect{
Gateway: consulIngressGateway1,
},
}},
}, true)
})
t.Run("tg does not use connect", func(t *testing.T) {
try(t, &TaskGroup{
Services: []*Service{
{Connect: nil},
},
}, false)
})
}
2015-09-15 18:23:03 +00:00
func TestTaskGroup_Validate(t *testing.T) {
ci.Parallel(t)
j := testJob()
tg := &TaskGroup{
2016-03-17 18:29:41 +00:00
Count: -1,
RestartPolicy: &RestartPolicy{
Interval: 5 * time.Minute,
Delay: 10 * time.Second,
Attempts: 10,
Mode: RestartPolicyModeDelay,
},
ReschedulePolicy: &ReschedulePolicy{
Interval: 5 * time.Minute,
Attempts: 5,
Delay: 5 * time.Second,
},
}
err := tg.Validate(j)
requireErrors(t, err,
"group name",
"count can't be negative",
"Missing tasks",
)
2015-09-15 18:23:03 +00:00
tg = &TaskGroup{
Tasks: []*Task{
2017-09-26 22:26:33 +00:00
{
Name: "task-a",
Resources: &Resources{
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
ReservedPorts: []Port{{Label: "foo", Value: 123}},
},
},
},
},
2017-09-26 22:26:33 +00:00
{
Name: "task-b",
Resources: &Resources{
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
ReservedPorts: []Port{{Label: "foo", Value: 123}},
},
},
},
},
},
}
err = tg.Validate(&Job{})
2017-07-07 23:58:20 +00:00
expected := `Static port 123 already reserved by task-a:foo`
if !strings.Contains(err.Error(), expected) {
t.Errorf("expected %s but found: %v", expected, err)
}
tg = &TaskGroup{
Tasks: []*Task{
2017-09-26 22:26:33 +00:00
{
2017-07-07 23:58:20 +00:00
Name: "task-a",
Resources: &Resources{
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
2017-07-07 23:58:20 +00:00
ReservedPorts: []Port{
{Label: "foo", Value: 123},
{Label: "bar", Value: 123},
},
},
},
},
},
},
}
err = tg.Validate(&Job{})
expected = `Static port 123 already reserved by task-a:foo`
if !strings.Contains(err.Error(), expected) {
t.Errorf("expected %s but found: %v", expected, err)
}
2015-09-15 18:23:03 +00:00
tg = &TaskGroup{
Name: "web",
Count: 1,
Tasks: []*Task{
2017-09-26 22:26:33 +00:00
{Name: "web", Leader: true},
{Name: "web", Leader: true},
{},
2015-09-15 18:23:03 +00:00
},
RestartPolicy: &RestartPolicy{
Interval: 5 * time.Minute,
Delay: 10 * time.Second,
Attempts: 10,
Mode: RestartPolicyModeDelay,
},
ReschedulePolicy: &ReschedulePolicy{
Interval: 5 * time.Minute,
Attempts: 10,
Delay: 5 * time.Second,
2018-03-26 19:45:09 +00:00
DelayFunction: "constant",
},
2015-09-15 18:23:03 +00:00
}
2016-07-20 23:43:20 +00:00
err = tg.Validate(j)
requireErrors(t, err,
"should have an ephemeral disk object",
"2 redefines 'web' from task 1",
"Task 3 missing name",
"Only one task may be marked as leader",
"Task web validation failed",
)
2018-03-29 18:13:50 +00:00
tg = &TaskGroup{
Name: "web",
Count: 1,
Tasks: []*Task{
{Name: "web", Leader: true},
},
Update: DefaultUpdateStrategy.Copy(),
}
j.Type = JobTypeBatch
err = tg.Validate(j)
require.Error(t, err, "does not allow update block")
tg = &TaskGroup{
Count: -1,
RestartPolicy: &RestartPolicy{
Interval: 5 * time.Minute,
Delay: 10 * time.Second,
Attempts: 10,
Mode: RestartPolicyModeDelay,
},
ReschedulePolicy: &ReschedulePolicy{
Interval: 5 * time.Minute,
Attempts: 5,
Delay: 5 * time.Second,
},
}
j.Type = JobTypeSystem
err = tg.Validate(j)
if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") {
t.Fatalf("err: %s", err)
}
tg = &TaskGroup{
Networks: []*NetworkResource{
{
DynamicPorts: []Port{{"http", 0, 80, ""}},
},
},
Tasks: []*Task{
{
Resources: &Resources{
Networks: []*NetworkResource{
{
DynamicPorts: []Port{{"http", 0, 80, ""}},
},
},
},
},
},
}
err = tg.Validate(j)
require.Contains(t, err.Error(), "Port label http already in use")
tg = &TaskGroup{
Volumes: map[string]*VolumeRequest{
"foo": {
config: Hoist volume.config.source into volume Currently, using a Volume in a job uses the following configuration: ``` volume "alias-name" { type = "volume-type" read_only = true config { source = "host_volume_name" } } ``` This commit migrates to the following: ``` volume "alias-name" { type = "volume-type" source = "host_volume_name" read_only = true } ``` The original design was based due to being uncertain about the future of storage plugins, and to allow maxium flexibility. However, this causes a few issues, namely: - We frequently need to parse this configuration during submission, scheduling, and mounting - It complicates the configuration from and end users perspective - It complicates the ability to do validation As we understand the problem space of CSI a little more, it has become clear that we won't need the `source` to be in config, as it will be used in the majority of cases: - Host Volumes: Always need a source - Preallocated CSI Volumes: Always needs a source from a volume or claim name - Dynamic Persistent CSI Volumes*: Always needs a source to attach the volumes to for managing upgrades and to avoid dangling. - Dynamic Ephemeral CSI Volumes*: Less thought out, but `source` will probably point to the plugin name, and a `config` block will allow you to pass meta to the plugin. Or will point to a pre-configured ephemeral config. *If implemented The new design simplifies this by merging the source into the volume stanza to solve the above issues with usability, performance, and error handling.
2019-09-13 02:09:58 +00:00
Type: "nothost",
Source: "foo",
},
},
Tasks: []*Task{
{
Name: "task-a",
Resources: &Resources{},
},
},
}
err = tg.Validate(&Job{})
require.Contains(t, err.Error(), `volume has unrecognized type nothost`)
tg = &TaskGroup{
Volumes: map[string]*VolumeRequest{
"foo": {
Type: "host",
},
},
Tasks: []*Task{
{
Name: "task-a",
Resources: &Resources{},
},
},
}
err = tg.Validate(&Job{})
require.Contains(t, err.Error(), `volume has an empty source`)
tg = &TaskGroup{
Name: "group-a",
Update: &UpdateStrategy{
Canary: 1,
},
Volumes: map[string]*VolumeRequest{
"foo": {
Type: "csi",
PerAlloc: true,
},
},
Tasks: []*Task{
{
Name: "task-a",
Resources: &Resources{},
},
},
}
err = tg.Validate(&Job{})
require.Contains(t, err.Error(), `volume has an empty source`)
require.Contains(t, err.Error(), `volume cannot be per_alloc when canaries are in use`)
require.Contains(t, err.Error(), `CSI volumes must have an attachment mode`)
require.Contains(t, err.Error(), `CSI volumes must have an access mode`)
tg = &TaskGroup{
Volumes: map[string]*VolumeRequest{
"foo": {
Type: "host",
},
},
Tasks: []*Task{
{
Name: "task-a",
Resources: &Resources{},
VolumeMounts: []*VolumeMount{
{
Volume: "",
},
},
},
{
Name: "task-b",
Resources: &Resources{},
VolumeMounts: []*VolumeMount{
{
Volume: "foob",
},
},
},
},
}
err = tg.Validate(&Job{})
expected = `Task task-a has a volume mount (0) referencing an empty volume`
require.Contains(t, err.Error(), expected)
expected = `Task task-b has a volume mount (0) referencing undefined volume foob`
require.Contains(t, err.Error(), expected)
taskA := &Task{Name: "task-a"}
tg = &TaskGroup{
Name: "group-a",
Services: []*Service{
{
Name: "service-a",
Provider: "consul",
Checks: []*ServiceCheck{
{
Name: "check-a",
Type: "tcp",
TaskName: "task-b",
PortLabel: "http",
Interval: time.Duration(1 * time.Second),
Timeout: time.Duration(1 * time.Second),
},
},
},
},
Tasks: []*Task{taskA},
}
err = tg.Validate(&Job{})
expected = `Check check-a invalid: refers to non-existent task task-b`
require.Contains(t, err.Error(), expected)
expected = `Check check-a invalid: only script and gRPC checks should have tasks`
require.Contains(t, err.Error(), expected)
tg = &TaskGroup{
Name: "group-a",
Services: []*Service{
{
Name: "service-a",
Provider: "nomad",
},
{
Name: "service-b",
Provider: "consul",
},
},
Tasks: []*Task{{Name: "task-a"}},
}
err = tg.Validate(&Job{})
expected = "Multiple service providers used: task group services must use the same provider"
require.Contains(t, err.Error(), expected)
tg = &TaskGroup{
Name: "group-a",
Services: []*Service{
{
Name: "service-a",
Provider: "nomad",
},
},
Tasks: []*Task{
{
Name: "task-a",
Services: []*Service{
{
Name: "service-b",
Provider: "consul",
},
},
},
},
}
err = tg.Validate(&Job{})
expected = "Multiple service providers used: task group services must use the same provider"
require.Contains(t, err.Error(), expected)
2015-09-15 18:23:03 +00:00
}
func TestTaskGroupNetwork_Validate(t *testing.T) {
ci.Parallel(t)
cases := []struct {
TG *TaskGroup
ErrContains string
}{
{
TG: &TaskGroup{
Name: "group-static-value-ok",
Networks: Networks{
&NetworkResource{
ReservedPorts: []Port{
{
Label: "ok",
Value: 65535,
},
},
},
},
},
},
{
TG: &TaskGroup{
Name: "group-dynamic-value-ok",
Networks: Networks{
&NetworkResource{
DynamicPorts: []Port{
{
Label: "ok",
Value: 65535,
},
},
},
},
},
},
{
TG: &TaskGroup{
Name: "group-static-to-ok",
Networks: Networks{
&NetworkResource{
ReservedPorts: []Port{
{
Label: "ok",
To: 65535,
},
},
},
},
},
},
{
TG: &TaskGroup{
Name: "group-dynamic-to-ok",
Networks: Networks{
&NetworkResource{
DynamicPorts: []Port{
{
Label: "ok",
To: 65535,
},
},
},
},
},
},
{
TG: &TaskGroup{
Name: "group-static-value-too-high",
Networks: Networks{
&NetworkResource{
ReservedPorts: []Port{
{
Label: "too-high",
Value: 65536,
},
},
},
},
},
ErrContains: "greater than",
},
{
TG: &TaskGroup{
Name: "group-dynamic-value-too-high",
Networks: Networks{
&NetworkResource{
DynamicPorts: []Port{
{
Label: "too-high",
Value: 65536,
},
},
},
},
},
ErrContains: "greater than",
},
{
TG: &TaskGroup{
Name: "group-static-to-too-high",
Networks: Networks{
&NetworkResource{
ReservedPorts: []Port{
{
Label: "too-high",
To: 65536,
},
},
},
},
},
ErrContains: "greater than",
},
{
TG: &TaskGroup{
Name: "group-dynamic-to-too-high",
Networks: Networks{
&NetworkResource{
DynamicPorts: []Port{
{
Label: "too-high",
To: 65536,
},
},
},
},
},
ErrContains: "greater than",
},
{
TG: &TaskGroup{
Name: "group-same-static-port-different-host_network",
Networks: Networks{
&NetworkResource{
ReservedPorts: []Port{
{
Label: "net1_http",
Value: 80,
HostNetwork: "net1",
},
{
Label: "net2_http",
Value: 80,
HostNetwork: "net2",
},
},
},
},
},
},
{
TG: &TaskGroup{
Name: "mixing-group-task-ports",
Networks: Networks{
&NetworkResource{
ReservedPorts: []Port{
{
Label: "group_http",
Value: 80,
},
},
},
},
Tasks: []*Task{
{
Name: "task1",
Resources: &Resources{
Networks: Networks{
&NetworkResource{
ReservedPorts: []Port{
{
Label: "task_http",
Value: 80,
},
},
},
},
},
},
},
},
ErrContains: "already reserved by",
},
{
TG: &TaskGroup{
Name: "mixing-group-task-ports-with-host_network",
Networks: Networks{
&NetworkResource{
ReservedPorts: []Port{
{
Label: "group_http",
Value: 80,
HostNetwork: "net1",
},
},
},
},
Tasks: []*Task{
{
Name: "task1",
Resources: &Resources{
Networks: Networks{
&NetworkResource{
ReservedPorts: []Port{
{
Label: "task_http",
Value: 80,
},
},
},
},
},
},
},
},
},
{
TG: &TaskGroup{
Tasks: []*Task{
{Driver: "docker"},
},
Networks: []*NetworkResource{
{
Mode: "bridge",
Hostname: "foobar",
},
},
},
},
{
TG: &TaskGroup{
Tasks: []*Task{
{Name: "hostname-invalid-dns-name"},
},
Networks: []*NetworkResource{
{
Mode: "bridge",
Hostname: "............",
},
},
},
ErrContains: "Hostname is not a valid DNS name",
},
}
for i := range cases {
tc := cases[i]
t.Run(tc.TG.Name, func(t *testing.T) {
err := tc.TG.validateNetworks()
t.Logf("%s -> %v", tc.TG.Name, err)
if tc.ErrContains == "" {
require.NoError(t, err)
return
}
require.Error(t, err)
require.Contains(t, err.Error(), tc.ErrContains)
})
}
}
2015-09-15 18:23:03 +00:00
func TestTask_Validate(t *testing.T) {
ci.Parallel(t)
2015-09-15 18:23:03 +00:00
task := &Task{}
ephemeralDisk := DefaultEphemeralDisk()
err := task.Validate(ephemeralDisk, JobTypeBatch, nil, nil)
requireErrors(t, err,
"task name",
"task driver",
"task resources",
)
2015-09-15 18:23:03 +00:00
task = &Task{Name: "web/foo"}
err = task.Validate(ephemeralDisk, JobTypeBatch, nil, nil)
require.Error(t, err, "slashes")
2015-09-15 18:23:03 +00:00
task = &Task{
2016-02-02 21:50:30 +00:00
Name: "web",
Driver: "docker",
Resources: &Resources{
CPU: 100,
MemoryMB: 100,
},
2016-02-11 18:42:56 +00:00
LogConfig: DefaultLogConfig(),
2015-09-15 18:23:03 +00:00
}
ephemeralDisk.SizeMB = 200
err = task.Validate(ephemeralDisk, JobTypeBatch, nil, nil)
2015-09-15 18:23:03 +00:00
if err != nil {
t.Fatalf("err: %s", err)
}
2017-03-12 00:23:24 +00:00
task.Constraints = append(task.Constraints,
&Constraint{
Operand: ConstraintDistinctHosts,
},
&Constraint{
Operand: ConstraintDistinctProperty,
LTarget: "${meta.rack}",
})
err = task.Validate(ephemeralDisk, JobTypeBatch, nil, nil)
requireErrors(t, err,
"task level: distinct_hosts",
"task level: distinct_property",
)
2015-09-15 18:23:03 +00:00
}
func TestTask_Validate_Resources(t *testing.T) {
ci.Parallel(t)
cases := []struct {
name string
res *Resources
err string
}{
{
name: "Minimum",
res: MinResources(),
},
{
name: "Default",
res: DefaultResources(),
},
{
name: "Full",
res: &Resources{
CPU: 1000,
MemoryMB: 1000,
MemoryMaxMB: 2000,
IOPS: 1000,
Networks: []*NetworkResource{
{
Mode: "host",
Device: "localhost",
CIDR: "127.0.0.0/8",
IP: "127.0.0.1",
MBits: 1000,
DNS: &DNSConfig{
Servers: []string{"localhost"},
Searches: []string{"localdomain"},
Options: []string{"ndots:5"},
},
ReservedPorts: []Port{
{
Label: "reserved",
Value: 1234,
To: 1234,
HostNetwork: "loopback",
},
},
DynamicPorts: []Port{
{
Label: "dynamic",
Value: 5678,
To: 5678,
HostNetwork: "loopback",
},
},
},
},
},
},
{
name: "too little cpu",
res: &Resources{
CPU: 0,
MemoryMB: 200,
},
err: "minimum CPU value is 1",
},
{
name: "too little memory",
res: &Resources{
CPU: 100,
MemoryMB: 1,
},
err: "minimum MemoryMB value is 10; got 1",
},
{
name: "too little memory max",
res: &Resources{
CPU: 100,
MemoryMB: 200,
MemoryMaxMB: 10,
},
err: "MemoryMaxMB value (10) should be larger than MemoryMB value (200",
},
}
for i := range cases {
tc := cases[i]
t.Run(tc.name, func(t *testing.T) {
err := tc.res.Validate()
if tc.err == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), tc.err)
}
})
}
}
func TestNetworkResource_Copy(t *testing.T) {
ci.Parallel(t)
testCases := []struct {
inputNetworkResource *NetworkResource
name string
}{
{
inputNetworkResource: nil,
name: "nil input check",
},
{
inputNetworkResource: &NetworkResource{
Mode: "bridge",
Device: "eth0",
CIDR: "10.0.0.1/8",
IP: "10.1.1.13",
Hostname: "foobar",
MBits: 1000,
DNS: &DNSConfig{
Servers: []string{"8.8.8.8", "8.8.4.4"},
Searches: []string{"example.com"},
Options: []string{"ndot:2"},
},
ReservedPorts: []Port{
{
Label: "foo",
Value: 1313,
To: 1313,
HostNetwork: "private",
},
},
DynamicPorts: []Port{
{
Label: "bar",
To: 1414,
HostNetwork: "public",
},
},
},
name: "fully populated input check",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
output := tc.inputNetworkResource.Copy()
assert.Equal(t, tc.inputNetworkResource, output, tc.name)
2021-12-23 20:28:19 +00:00
if output == nil {
return
}
// Assert changes to the copy aren't propagated to the
// original
output.DNS.Servers[1] = "foo"
assert.NotEqual(t, tc.inputNetworkResource, output, tc.name)
})
}
}
func TestTask_Validate_Services(t *testing.T) {
ci.Parallel(t)
2016-06-12 23:36:49 +00:00
s1 := &Service{
Name: "service-name",
Provider: "consul",
PortLabel: "bar",
Checks: []*ServiceCheck{
{
2016-07-09 05:33:04 +00:00
Name: "check-name",
Type: ServiceCheckTCP,
Interval: 0 * time.Second,
},
2016-05-03 20:16:02 +00:00
{
2016-07-09 05:33:04 +00:00
Name: "check-name",
Type: ServiceCheckTCP,
Timeout: 2 * time.Second,
2016-05-03 20:16:02 +00:00
},
{
Name: "check-name",
Type: ServiceCheckTCP,
Interval: 1 * time.Second,
},
},
}
2016-06-12 23:36:49 +00:00
s2 := &Service{
Name: "service-name",
Provider: "consul",
PortLabel: "bar",
}
s3 := &Service{
Name: "service-A",
Provider: "consul",
PortLabel: "a",
}
s4 := &Service{
Name: "service-A",
Provider: "consul",
PortLabel: "b",
2016-05-02 20:40:49 +00:00
}
ephemeralDisk := DefaultEphemeralDisk()
ephemeralDisk.SizeMB = 200
task := &Task{
Name: "web",
Driver: "docker",
Resources: &Resources{
CPU: 100,
MemoryMB: 100,
},
2016-06-12 23:36:49 +00:00
Services: []*Service{s1, s2},
}
task1 := &Task{
Name: "web",
Driver: "docker",
Resources: DefaultResources(),
Services: []*Service{s3, s4},
LogConfig: DefaultLogConfig(),
}
tgNetworks := []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
MBits: 10,
DynamicPorts: []Port{
2017-09-26 22:26:33 +00:00
{
Label: "a",
Value: 1000,
},
2017-09-26 22:26:33 +00:00
{
Label: "b",
Value: 2000,
},
},
},
}
2016-08-25 18:53:09 +00:00
err := task.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks)
if err == nil {
t.Fatal("expected an error")
}
2016-05-02 20:40:49 +00:00
if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") {
2016-05-03 20:16:02 +00:00
t.Fatalf("err: %v", err)
}
if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") {
2016-05-02 20:40:49 +00:00
t.Fatalf("err: %v", err)
}
2016-07-09 05:33:04 +00:00
if !strings.Contains(err.Error(), "missing required value interval") {
t.Fatalf("err: %v", err)
}
2016-11-01 23:05:34 +00:00
if !strings.Contains(err.Error(), "cannot be less than") {
2016-08-12 19:09:44 +00:00
t.Fatalf("err: %v", err)
2016-08-16 19:05:15 +00:00
}
if err = task1.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks); err != nil {
t.Fatalf("err : %v", err)
}
2016-08-16 19:05:15 +00:00
}
func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) {
ci.Parallel(t)
ephemeralDisk := DefaultEphemeralDisk()
getTask := func(s *Service) *Task {
task := &Task{
Name: "web",
Driver: "docker",
Resources: DefaultResources(),
Services: []*Service{s},
LogConfig: DefaultLogConfig(),
}
return task
}
tgNetworks := []*NetworkResource{
{
DynamicPorts: []Port{
{
Label: "http",
Value: 80,
},
},
},
}
cases := []*Service{
{
// https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177
Name: "DriverModeWithLabel",
Provider: "consul",
PortLabel: "http",
AddressMode: AddressModeDriver,
},
{
Name: "DriverModeWithPort",
Provider: "consul",
PortLabel: "80",
AddressMode: AddressModeDriver,
},
{
Name: "HostModeWithLabel",
Provider: "consul",
PortLabel: "http",
AddressMode: AddressModeHost,
},
{
Name: "HostModeWithoutLabel",
Provider: "consul",
AddressMode: AddressModeHost,
},
{
Name: "DriverModeWithoutLabel",
Provider: "consul",
AddressMode: AddressModeDriver,
},
}
for _, service := range cases {
task := getTask(service)
t.Run(service.Name, func(t *testing.T) {
if err := task.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks); err != nil {
t.Fatalf("unexpected err: %v", err)
}
})
}
}
func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) {
ci.Parallel(t)
ephemeralDisk := DefaultEphemeralDisk()
getTask := func(s *Service) *Task {
return &Task{
Name: "web",
Driver: "docker",
Resources: DefaultResources(),
Services: []*Service{s},
LogConfig: DefaultLogConfig(),
}
}
tgNetworks := []*NetworkResource{
{
DynamicPorts: []Port{
{
Label: "http",
Value: 80,
},
},
},
}
cases := []*Service{
{
// https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177
Name: "DriverModeWithLabel",
PortLabel: "asdf",
AddressMode: AddressModeDriver,
},
{
Name: "HostModeWithLabel",
PortLabel: "asdf",
AddressMode: AddressModeHost,
},
{
Name: "HostModeWithPort",
PortLabel: "80",
AddressMode: AddressModeHost,
},
}
for _, service := range cases {
task := getTask(service)
t.Run(service.Name, func(t *testing.T) {
err := task.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks)
if err == nil {
t.Fatalf("expected an error")
}
//t.Logf("err: %v", err)
})
}
}
2016-08-16 19:05:15 +00:00
func TestTask_Validate_Service_Check(t *testing.T) {
ci.Parallel(t)
2016-08-16 19:05:15 +00:00
invalidCheck := ServiceCheck{
Name: "check-name",
Command: "/bin/true",
Type: ServiceCheckScript,
Interval: 10 * time.Second,
}
err := invalidCheck.validate()
if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") {
t.Fatalf("expected a timeout validation error but received: %q", err)
}
2016-08-16 19:05:15 +00:00
check1 := ServiceCheck{
Name: "check-name",
Type: ServiceCheckTCP,
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
}
if err := check1.validate(); err != nil {
t.Fatalf("err: %v", err)
2016-08-16 19:05:15 +00:00
}
check1.InitialStatus = "foo"
err = check1.validate()
if err == nil {
t.Fatal("Expected an error")
}
if !strings.Contains(err.Error(), "invalid initial check state (foo)") {
t.Fatalf("err: %v", err)
}
check1.InitialStatus = api.HealthCritical
err = check1.validate()
if err != nil {
t.Fatalf("err: %v", err)
}
check1.InitialStatus = api.HealthPassing
err = check1.validate()
if err != nil {
t.Fatalf("err: %v", err)
}
check1.InitialStatus = ""
err = check1.validate()
if err != nil {
t.Fatalf("err: %v", err)
2016-07-09 05:33:04 +00:00
}
2017-12-21 09:32:12 +00:00
check2 := ServiceCheck{
Name: "check-name-2",
Type: ServiceCheckHTTP,
Interval: 10 * time.Second,
Timeout: 2 * time.Second,
Path: "/foo/bar",
}
err = check2.validate()
if err != nil {
t.Fatalf("err: %v", err)
}
check2.Path = ""
err = check2.validate()
if err == nil {
t.Fatal("Expected an error")
}
if !strings.Contains(err.Error(), "valid http path") {
t.Fatalf("err: %v", err)
}
check2.Path = "http://www.example.com"
err = check2.validate()
if err == nil {
t.Fatal("Expected an error")
}
if !strings.Contains(err.Error(), "relative http path") {
t.Fatalf("err: %v", err)
}
connect: enable automatic expose paths for individual group service checks Part of #6120 Building on the support for enabling connect proxy paths in #7323, this change adds the ability to configure the 'service.check.expose' flag on group-level service check definitions for services that are connect-enabled. This is a slight deviation from the "magic" that Consul provides. With Consul, the 'expose' flag exists on the connect.proxy stanza, which will then auto-generate expose paths for every HTTP and gRPC service check associated with that connect-enabled service. A first attempt at providing similar magic for Nomad's Consul Connect integration followed that pattern exactly, as seen in #7396. However, on reviewing the PR we realized having the `expose` flag on the proxy stanza inseperably ties together the automatic path generation with every HTTP/gRPC defined on the service. This makes sense in Consul's context, because a service definition is reasonably associated with a single "task". With Nomad's group level service definitions however, there is a reasonable expectation that a service definition is more abstractly representative of multiple services within the task group. In this case, one would want to define checks of that service which concretely make HTTP or gRPC requests to different underlying tasks. Such a model is not possible with the course `proxy.expose` flag. Instead, we now have the flag made available within the check definitions themselves. By making the expose feature resolute to each check, it is possible to have some HTTP/gRPC checks which make use of the envoy exposed paths, as well as some HTTP/gRPC checks which make use of some orthongonal port-mapping to do checks on some other task (or even some other bound port of the same task) within the task group. Given this example, group "server-group" { network { mode = "bridge" port "forchecks" { to = -1 } } service { name = "myserver" port = 2000 connect { sidecar_service { } } check { name = "mycheck-myserver" type = "http" port = "forchecks" interval = "3s" timeout = "2s" method = "GET" path = "/classic/responder/health" expose = true } } } Nomad will automatically inject (via job endpoint mutator) the extrapolated expose path configuration, i.e. expose { path { path = "/classic/responder/health" protocol = "http" local_path_port = 2000 listener_port = "forchecks" } } Documentation is coming in #7440 (needs updating, doing next) Modifications to the `countdash` examples in https://github.com/hashicorp/demo-consul-101/pull/6 which will make the examples in the documentation actually runnable. Will add some e2e tests based on the above when it becomes available.
2020-03-25 01:49:55 +00:00
t.Run("check expose", func(t *testing.T) {
t.Run("type http", func(t *testing.T) {
require.NoError(t, (&ServiceCheck{
Type: ServiceCheckHTTP,
Interval: 1 * time.Second,
Timeout: 1 * time.Second,
Path: "/health",
Expose: true,
}).validate())
})
t.Run("type tcp", func(t *testing.T) {
require.EqualError(t, (&ServiceCheck{
Type: ServiceCheckTCP,
Interval: 1 * time.Second,
Timeout: 1 * time.Second,
Expose: true,
}).validate(), "expose may only be set on HTTP or gRPC checks")
})
})
}
// TestTask_Validate_Service_Check_AddressMode asserts that checks do not
// inherit address mode but do inherit ports.
func TestTask_Validate_Service_Check_AddressMode(t *testing.T) {
ci.Parallel(t)
getTask := func(s *Service) (*Task, *TaskGroup) {
return &Task{
Services: []*Service{s},
}, &TaskGroup{
Networks: []*NetworkResource{
{
DynamicPorts: []Port{
{
Label: "http",
Value: 9999,
},
},
},
},
}
}
cases := []struct {
Service *Service
ErrContains string
}{
{
Service: &Service{
Name: "invalid-driver",
Provider: "consul",
PortLabel: "80",
AddressMode: "host",
},
ErrContains: `port label "80" referenced`,
},
{
Service: &Service{
Name: "http-driver-fail-1",
PortLabel: "80",
AddressMode: "driver",
Checks: []*ServiceCheck{
{
Name: "invalid-check-1",
Type: "tcp",
Interval: time.Second,
Timeout: time.Second,
},
},
},
ErrContains: `check "invalid-check-1" cannot use a numeric port`,
},
{
Service: &Service{
Name: "http-driver-fail-2",
Provider: "consul",
PortLabel: "80",
AddressMode: "driver",
Checks: []*ServiceCheck{
{
Name: "invalid-check-2",
Type: "tcp",
PortLabel: "80",
Interval: time.Second,
Timeout: time.Second,
},
},
},
ErrContains: `check "invalid-check-2" cannot use a numeric port`,
},
{
Service: &Service{
Name: "http-driver-fail-3",
Provider: "consul",
PortLabel: "80",
AddressMode: "driver",
Checks: []*ServiceCheck{
{
Name: "invalid-check-3",
Type: "tcp",
PortLabel: "missing-port-label",
Interval: time.Second,
Timeout: time.Second,
},
},
},
ErrContains: `port label "missing-port-label" referenced`,
},
{
Service: &Service{
Name: "http-driver-passes",
Provider: "consul",
PortLabel: "80",
AddressMode: "driver",
Checks: []*ServiceCheck{
{
Name: "valid-script-check",
Type: "script",
Command: "ok",
Interval: time.Second,
Timeout: time.Second,
},
{
Name: "valid-host-check",
Type: "tcp",
PortLabel: "http",
Interval: time.Second,
Timeout: time.Second,
},
{
Name: "valid-driver-check",
Type: "tcp",
AddressMode: "driver",
Interval: time.Second,
Timeout: time.Second,
},
},
},
},
{
Service: &Service{
Name: "empty-address-3673-passes-1",
Provider: "consul",
Checks: []*ServiceCheck{
{
Name: "valid-port-label",
Type: "tcp",
PortLabel: "http",
Interval: time.Second,
Timeout: time.Second,
},
{
Name: "empty-is-ok",
Type: "script",
Command: "ok",
Interval: time.Second,
Timeout: time.Second,
},
},
},
},
{
Service: &Service{
Name: "empty-address-3673-passes-2",
},
},
{
Service: &Service{
Name: "empty-address-3673-fails",
Provider: "consul",
Checks: []*ServiceCheck{
{
Name: "empty-is-not-ok",
Type: "tcp",
Interval: time.Second,
Timeout: time.Second,
},
},
},
ErrContains: `invalid: check requires a port but neither check nor service`,
},
{
Service: &Service{
Name: "conect-block-on-task-level",
Connect: &ConsulConnect{SidecarService: &ConsulSidecarService{}},
},
ErrContains: `cannot have "connect" block`,
},
}
for _, tc := range cases {
tc := tc
task, tg := getTask(tc.Service)
t.Run(tc.Service.Name, func(t *testing.T) {
err := validateServices(task, tg.Networks)
if err == nil && tc.ErrContains == "" {
// Ok!
return
}
if err == nil {
t.Fatalf("no error returned. expected: %s", tc.ErrContains)
}
if !strings.Contains(err.Error(), tc.ErrContains) {
t.Fatalf("expected %q but found: %v", tc.ErrContains, err)
}
})
}
}
func TestTask_Validate_Service_Check_GRPC(t *testing.T) {
ci.Parallel(t)
// Bad (no port)
invalidGRPC := &ServiceCheck{
Type: ServiceCheckGRPC,
Interval: time.Second,
Timeout: time.Second,
}
service := &Service{
Name: "test",
Provider: "consul",
Checks: []*ServiceCheck{invalidGRPC},
}
assert.Error(t, service.Validate())
// Good
service.Checks[0] = &ServiceCheck{
Type: ServiceCheckGRPC,
Interval: time.Second,
Timeout: time.Second,
PortLabel: "some-port-label",
}
assert.NoError(t, service.Validate())
}
2017-09-15 22:12:47 +00:00
func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) {
ci.Parallel(t)
2017-09-15 22:12:47 +00:00
invalidCheckRestart := &CheckRestart{
Limit: -1,
Grace: -1,
}
err := invalidCheckRestart.Validate()
assert.NotNil(t, err, "invalidateCheckRestart.Validate()")
assert.Len(t, err.(*multierror.Error).Errors, 2)
validCheckRestart := &CheckRestart{}
assert.Nil(t, validCheckRestart.Validate())
validCheckRestart.Limit = 1
validCheckRestart.Grace = 1
assert.Nil(t, validCheckRestart.Validate())
}
func TestTask_Validate_ConnectProxyKind(t *testing.T) {
ci.Parallel(t)
ephemeralDisk := DefaultEphemeralDisk()
2019-08-12 22:41:40 +00:00
getTask := func(kind TaskKind, leader bool) *Task {
task := &Task{
Name: "web",
Driver: "docker",
Resources: DefaultResources(),
LogConfig: DefaultLogConfig(),
Kind: kind,
Leader: leader,
}
task.Resources.Networks = []*NetworkResource{
{
MBits: 10,
DynamicPorts: []Port{
{
Label: "http",
Value: 80,
},
},
},
}
return task
}
cases := []struct {
Desc string
2019-08-12 22:41:40 +00:00
Kind TaskKind
Leader bool
Service *Service
TgService []*Service
ErrContains string
}{
{
Desc: "Not connect",
Kind: "test",
},
{
Desc: "Invalid because of service in task definition",
2019-08-12 22:41:40 +00:00
Kind: "connect-proxy:redis",
Service: &Service{
Name: "redis",
},
2019-08-12 22:41:40 +00:00
ErrContains: "Connect proxy task must not have a service stanza",
},
{
Desc: "Leader should not be set",
2019-08-12 22:41:40 +00:00
Kind: "connect-proxy:redis",
Leader: true,
Service: &Service{
Name: "redis",
},
ErrContains: "Connect proxy task must not have leader set",
},
{
Desc: "Service name invalid",
2019-08-12 22:41:40 +00:00
Kind: "connect-proxy:redis:test",
Service: &Service{
Name: "redis",
},
ErrContains: `No Connect services in task group with Connect proxy ("redis:test")`,
},
{
Desc: "Service name not found in group",
2019-08-12 22:41:40 +00:00
Kind: "connect-proxy:redis",
ErrContains: `No Connect services in task group with Connect proxy ("redis")`,
},
{
Desc: "Connect stanza not configured in group",
2019-08-12 22:41:40 +00:00
Kind: "connect-proxy:redis",
TgService: []*Service{{
Name: "redis",
}},
ErrContains: `No Connect services in task group with Connect proxy ("redis")`,
},
{
Desc: "Valid connect proxy kind",
2019-08-12 22:41:40 +00:00
Kind: "connect-proxy:redis",
TgService: []*Service{{
Name: "redis",
Connect: &ConsulConnect{
SidecarService: &ConsulSidecarService{
Port: "db",
},
},
}},
},
}
for _, tc := range cases {
tc := tc
task := getTask(tc.Kind, tc.Leader)
if tc.Service != nil {
task.Services = []*Service{tc.Service}
}
t.Run(tc.Desc, func(t *testing.T) {
err := task.Validate(ephemeralDisk, "service", tc.TgService, nil)
if err == nil && tc.ErrContains == "" {
// Ok!
return
}
require.Errorf(t, err, "no error returned. expected: %s", tc.ErrContains)
require.Containsf(t, err.Error(), tc.ErrContains, "expected %q but found: %v", tc.ErrContains, err)
})
}
}
2016-02-11 20:30:47 +00:00
func TestTask_Validate_LogConfig(t *testing.T) {
ci.Parallel(t)
2016-02-11 20:30:47 +00:00
task := &Task{
LogConfig: DefaultLogConfig(),
2016-08-25 18:53:09 +00:00
}
ephemeralDisk := &EphemeralDisk{
SizeMB: 1,
2016-02-11 20:30:47 +00:00
}
err := task.Validate(ephemeralDisk, JobTypeService, nil, nil)
require.Error(t, err, "log storage")
2016-02-11 20:30:47 +00:00
}
func TestLogConfig_Equals(t *testing.T) {
ci.Parallel(t)
t.Run("both nil", func(t *testing.T) {
a := (*LogConfig)(nil)
b := (*LogConfig)(nil)
require.True(t, a.Equals(b))
})
t.Run("one nil", func(t *testing.T) {
a := new(LogConfig)
b := (*LogConfig)(nil)
require.False(t, a.Equals(b))
})
t.Run("max files", func(t *testing.T) {
a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
b := &LogConfig{MaxFiles: 2, MaxFileSizeMB: 200}
require.False(t, a.Equals(b))
})
t.Run("max file size", func(t *testing.T) {
a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 100}
b := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
require.False(t, a.Equals(b))
})
t.Run("same", func(t *testing.T) {
a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
b := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
require.True(t, a.Equals(b))
})
}
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
func TestTask_Validate_CSIPluginConfig(t *testing.T) {
ci.Parallel(t)
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
table := []struct {
name string
pc *TaskCSIPluginConfig
expectedErr string
unexpectedErr string
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
}{
{
name: "no errors when not specified",
pc: nil,
unexpectedErr: "CSIPluginConfig",
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
},
{
name: "requires non-empty plugin id",
pc: &TaskCSIPluginConfig{},
expectedErr: "CSIPluginConfig must have a non-empty PluginID",
},
{
name: "requires valid plugin type",
pc: &TaskCSIPluginConfig{
ID: "com.hashicorp.csi",
Type: "nonsense",
},
expectedErr: "CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"nonsense\"",
},
}
for _, tt := range table {
t.Run(tt.name, func(t *testing.T) {
task := testJob().TaskGroups[0].Tasks[0]
task.CSIPluginConfig = tt.pc
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
ephemeralDisk := &EphemeralDisk{
SizeMB: 100,
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
}
err := task.Validate(ephemeralDisk, JobTypeService, nil, nil)
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
if tt.expectedErr != "" {
require.Error(t, err)
require.Contains(t, err.Error(), tt.expectedErr)
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
} else {
require.NoError(t, err)
CSI Plugin Registration (#6555) This changeset implements the initial registration and fingerprinting of CSI Plugins as part of #5378. At a high level, it introduces the following: * A `csi_plugin` stanza as part of a Nomad task configuration, to allow a task to expose that it is a plugin. * A new task runner hook: `csi_plugin_supervisor`. This hook does two things. When the `csi_plugin` stanza is detected, it will automatically configure the plugin task to receive bidirectional mounts to the CSI intermediary directory. At runtime, it will then perform an initial heartbeat of the plugin and handle submitting it to the new `dynamicplugins.Registry` for further use by the client, and then run a lightweight heartbeat loop that will emit task events when health changes. * The `dynamicplugins.Registry` for handling plugins that run as Nomad tasks, in contrast to the existing catalog that requires `go-plugin` type plugins and to know the plugin configuration in advance. * The `csimanager` which fingerprints CSI plugins, in a similar way to `drivermanager` and `devicemanager`. It currently only fingerprints the NodeID from the plugin, and assumes that all plugins are monolithic. Missing features * We do not use the live updates of the `dynamicplugin` registry in the `csimanager` yet. * We do not deregister the plugins from the client when they shutdown yet, they just become indefinitely marked as unhealthy. This is deliberate until we figure out how we should manage deploying new versions of plugins/transitioning them.
2019-10-22 13:20:26 +00:00
}
})
}
}
2016-09-23 22:39:52 +00:00
func TestTask_Validate_Template(t *testing.T) {
ci.Parallel(t)
2016-09-23 22:39:52 +00:00
bad := &Template{}
task := &Task{
Templates: []*Template{bad},
}
ephemeralDisk := &EphemeralDisk{
SizeMB: 1,
}
err := task.Validate(ephemeralDisk, JobTypeService, nil, nil)
2016-09-23 22:39:52 +00:00
if !strings.Contains(err.Error(), "Template 1 validation failed") {
t.Fatalf("err: %s", err)
}
// Have two templates that share the same destination
good := &Template{
SourcePath: "foo",
DestPath: "local/foo",
ChangeMode: "noop",
}
task.Templates = []*Template{good, good}
err = task.Validate(ephemeralDisk, JobTypeService, nil, nil)
if !strings.Contains(err.Error(), "same destination as") {
t.Fatalf("err: %s", err)
}
2017-07-26 03:27:18 +00:00
// Env templates can't use signals
task.Templates = []*Template{
{
Envvars: true,
ChangeMode: "signal",
},
}
err = task.Validate(ephemeralDisk, JobTypeService, nil, nil)
2017-07-26 03:27:18 +00:00
if err == nil {
t.Fatalf("expected error from Template.Validate")
}
if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) {
t.Errorf("expected to find %q but found %v", expected, err)
}
2016-09-23 22:39:52 +00:00
}
func TestTemplate_Validate(t *testing.T) {
ci.Parallel(t)
2016-09-23 22:39:52 +00:00
cases := []struct {
Tmpl *Template
Fail bool
ContainsErrs []string
}{
{
Tmpl: &Template{},
Fail: true,
ContainsErrs: []string{
"specify a source path",
"specify a destination",
TemplateChangeModeInvalidError.Error(),
},
},
{
Tmpl: &Template{
Splay: -100,
},
Fail: true,
ContainsErrs: []string{
"positive splay",
},
},
{
Tmpl: &Template{
ChangeMode: "foo",
},
Fail: true,
ContainsErrs: []string{
TemplateChangeModeInvalidError.Error(),
},
},
{
Tmpl: &Template{
ChangeMode: "signal",
},
Fail: true,
ContainsErrs: []string{
"specify signal value",
},
},
{
Tmpl: &Template{
SourcePath: "foo",
DestPath: "../../root",
ChangeMode: "noop",
},
Fail: true,
ContainsErrs: []string{
"destination escapes",
},
},
{
Tmpl: &Template{
SourcePath: "foo",
DestPath: "local/foo",
ChangeMode: "noop",
},
Fail: false,
},
{
Tmpl: &Template{
SourcePath: "foo",
DestPath: "local/foo",
ChangeMode: "noop",
Perms: "0444",
},
Fail: false,
},
{
Tmpl: &Template{
SourcePath: "foo",
DestPath: "local/foo",
ChangeMode: "noop",
Perms: "zza",
},
Fail: true,
ContainsErrs: []string{
"as octal",
},
},
{
Tmpl: &Template{
SourcePath: "foo",
DestPath: "local/foo",
ChangeMode: "noop",
Wait: &WaitConfig{
Min: helper.TimeToPtr(10 * time.Second),
Max: helper.TimeToPtr(5 * time.Second),
},
},
Fail: true,
ContainsErrs: []string{
"greater than",
},
},
{
Tmpl: &Template{
SourcePath: "foo",
DestPath: "local/foo",
ChangeMode: "noop",
Wait: &WaitConfig{
Min: helper.TimeToPtr(5 * time.Second),
Max: helper.TimeToPtr(5 * time.Second),
},
},
Fail: false,
},
{
Tmpl: &Template{
SourcePath: "foo",
DestPath: "local/foo",
ChangeMode: "noop",
Wait: &WaitConfig{
Min: helper.TimeToPtr(5 * time.Second),
Max: helper.TimeToPtr(10 * time.Second),
},
},
Fail: false,
},
2016-09-23 22:39:52 +00:00
}
for i, c := range cases {
err := c.Tmpl.Validate()
if err != nil {
if !c.Fail {
t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err)
}
e := err.Error()
for _, exp := range c.ContainsErrs {
if !strings.Contains(e, exp) {
t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e)
}
}
} else if c.Fail {
t.Fatalf("Case %d: should have failed: %v", i+1, err)
}
}
}
func TestTaskWaitConfig_Equals(t *testing.T) {
ci.Parallel(t)
testCases := []struct {
name string
config *WaitConfig
expected *WaitConfig
}{
{
name: "all-fields",
config: &WaitConfig{
Min: helper.TimeToPtr(5 * time.Second),
Max: helper.TimeToPtr(10 * time.Second),
},
expected: &WaitConfig{
Min: helper.TimeToPtr(5 * time.Second),
Max: helper.TimeToPtr(10 * time.Second),
},
},
{
name: "no-fields",
config: &WaitConfig{},
expected: &WaitConfig{},
},
{
name: "min-only",
config: &WaitConfig{
Min: helper.TimeToPtr(5 * time.Second),
},
expected: &WaitConfig{
Min: helper.TimeToPtr(5 * time.Second),
},
},
{
name: "max-only",
config: &WaitConfig{
Max: helper.TimeToPtr(10 * time.Second),
},
expected: &WaitConfig{
Max: helper.TimeToPtr(10 * time.Second),
},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
require.True(t, tc.config.Equals(tc.expected))
})
}
}
func TestConstraint_Validate(t *testing.T) {
ci.Parallel(t)
c := &Constraint{}
err := c.Validate()
require.Error(t, err, "Missing constraint operand")
c = &Constraint{
LTarget: "$attr.kernel.name",
RTarget: "linux",
Operand: "=",
}
err = c.Validate()
require.NoError(t, err)
// Perform additional regexp validation
c.Operand = ConstraintRegex
c.RTarget = "(foo"
err = c.Validate()
require.Error(t, err, "missing closing")
// Perform version validation
c.Operand = ConstraintVersion
c.RTarget = "~> foo"
err = c.Validate()
require.Error(t, err, "Malformed constraint")
// Perform semver validation
c.Operand = ConstraintSemver
err = c.Validate()
require.Error(t, err, "Malformed constraint")
c.RTarget = ">= 0.6.1"
require.NoError(t, c.Validate())
// Perform distinct_property validation
c.Operand = ConstraintDistinctProperty
c.RTarget = "0"
err = c.Validate()
require.Error(t, err, "count of 1 or greater")
c.RTarget = "-1"
err = c.Validate()
require.Error(t, err, "to uint64")
// Perform distinct_hosts validation
c.Operand = ConstraintDistinctHosts
c.LTarget = ""
c.RTarget = ""
if err := c.Validate(); err != nil {
t.Fatalf("expected valid constraint: %v", err)
}
2018-10-15 22:31:13 +00:00
// Perform set_contains* validation
c.RTarget = ""
2018-10-15 22:31:13 +00:00
for _, o := range []string{ConstraintSetContains, ConstraintSetContainsAll, ConstraintSetContainsAny} {
c.Operand = o
err = c.Validate()
require.Error(t, err, "requires an RTarget")
}
// Perform LTarget validation
c.Operand = ConstraintRegex
c.RTarget = "foo"
c.LTarget = ""
err = c.Validate()
require.Error(t, err, "No LTarget")
// Perform constraint type validation
c.Operand = "foo"
err = c.Validate()
require.Error(t, err, "Unknown constraint type")
}
2018-07-16 13:30:58 +00:00
func TestAffinity_Validate(t *testing.T) {
ci.Parallel(t)
2018-07-16 13:30:58 +00:00
type tc struct {
affinity *Affinity
err error
name string
}
testCases := []tc{
{
affinity: &Affinity{},
err: fmt.Errorf("Missing affinity operand"),
},
{
affinity: &Affinity{
Operand: "foo",
LTarget: "${meta.node_class}",
Weight: 10,
},
err: fmt.Errorf("Unknown affinity operator \"foo\""),
},
{
affinity: &Affinity{
Operand: "=",
LTarget: "${meta.node_class}",
Weight: 10,
},
err: fmt.Errorf("Operator \"=\" requires an RTarget"),
},
{
affinity: &Affinity{
Operand: "=",
LTarget: "${meta.node_class}",
RTarget: "c4",
Weight: 0,
},
err: fmt.Errorf("Affinity weight cannot be zero"),
},
{
affinity: &Affinity{
Operand: "=",
LTarget: "${meta.node_class}",
RTarget: "c4",
Weight: 110,
2018-07-16 13:30:58 +00:00
},
err: fmt.Errorf("Affinity weight must be within the range [-100,100]"),
},
{
affinity: &Affinity{
Operand: "=",
LTarget: "${node.class}",
Weight: 10,
},
err: fmt.Errorf("Operator \"=\" requires an RTarget"),
},
{
affinity: &Affinity{
Operand: "version",
LTarget: "${meta.os}",
RTarget: ">>2.0",
Weight: 110,
2018-07-16 13:30:58 +00:00
},
err: fmt.Errorf("Version affinity is invalid"),
},
{
affinity: &Affinity{
Operand: "regexp",
LTarget: "${meta.os}",
RTarget: "\\K2.0",
Weight: 100,
},
err: fmt.Errorf("Regular expression failed to compile"),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := tc.affinity.Validate()
if tc.err != nil {
require.NotNil(t, err)
require.Contains(t, err.Error(), tc.err.Error())
} else {
require.Nil(t, err)
}
})
}
}
func TestUpdateStrategy_Validate(t *testing.T) {
ci.Parallel(t)
u := &UpdateStrategy{
MaxParallel: -1,
2018-03-23 17:56:00 +00:00
HealthCheck: "foo",
MinHealthyTime: -10,
HealthyDeadline: -15,
ProgressDeadline: -25,
AutoRevert: false,
Canary: -1,
}
err := u.Validate()
requireErrors(t, err,
"Invalid health check given",
"Max parallel can not be less than zero",
"Canary count can not be less than zero",
"Minimum healthy time may not be less than zero",
"Healthy deadline must be greater than zero",
"Progress deadline must be zero or greater",
"Minimum healthy time must be less than healthy deadline",
"Healthy deadline must be less than progress deadline",
)
}
func TestResource_NetIndex(t *testing.T) {
ci.Parallel(t)
2015-08-05 00:23:42 +00:00
r := &Resources{
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{Device: "eth0"},
{Device: "lo0"},
{Device: ""},
2015-08-05 00:23:42 +00:00
},
}
if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 {
2015-08-05 00:23:42 +00:00
t.Fatalf("Bad: %d", idx)
}
if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 {
2015-08-05 00:23:42 +00:00
t.Fatalf("Bad: %d", idx)
}
if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 {
2015-09-12 23:21:57 +00:00
t.Fatalf("Bad: %d", idx)
}
}
2015-08-05 00:41:02 +00:00
func TestResource_Add(t *testing.T) {
ci.Parallel(t)
2015-08-05 00:41:02 +00:00
r1 := &Resources{
2015-09-23 18:14:32 +00:00
CPU: 2000,
2015-08-05 00:41:02 +00:00
MemoryMB: 2048,
DiskMB: 10000,
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
2015-08-05 00:41:02 +00:00
CIDR: "10.0.0.0/8",
MBits: 100,
ReservedPorts: []Port{{"ssh", 22, 0, ""}},
2015-08-05 00:41:02 +00:00
},
},
}
r2 := &Resources{
2015-09-23 18:14:32 +00:00
CPU: 2000,
2015-08-05 00:41:02 +00:00
MemoryMB: 1024,
DiskMB: 5000,
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
IP: "10.0.0.1",
2015-08-05 00:41:02 +00:00
MBits: 50,
ReservedPorts: []Port{{"web", 80, 0, ""}},
2015-08-05 00:41:02 +00:00
},
},
}
r1.Add(r2)
2015-08-05 00:41:02 +00:00
expect := &Resources{
2015-09-23 18:14:32 +00:00
CPU: 3000,
2015-08-05 00:41:02 +00:00
MemoryMB: 3072,
DiskMB: 15000,
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
2015-08-05 00:41:02 +00:00
CIDR: "10.0.0.0/8",
MBits: 150,
ReservedPorts: []Port{{"ssh", 22, 0, ""}, {"web", 80, 0, ""}},
2015-08-05 00:41:02 +00:00
},
},
}
if !reflect.DeepEqual(expect.Networks, r1.Networks) {
t.Fatalf("bad: %#v %#v", expect, r1)
}
}
func TestResource_Add_Network(t *testing.T) {
ci.Parallel(t)
r1 := &Resources{}
r2 := &Resources{
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
MBits: 50,
DynamicPorts: []Port{{"http", 0, 80, ""}, {"https", 0, 443, ""}},
},
},
}
r3 := &Resources{
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
MBits: 25,
DynamicPorts: []Port{{"admin", 0, 8080, ""}},
},
},
}
r1.Add(r2)
r1.Add(r3)
expect := &Resources{
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
MBits: 75,
DynamicPorts: []Port{{"http", 0, 80, ""}, {"https", 0, 443, ""}, {"admin", 0, 8080, ""}},
},
},
}
if !reflect.DeepEqual(expect.Networks, r1.Networks) {
t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0])
}
}
func TestComparableResources_Subtract(t *testing.T) {
ci.Parallel(t)
r1 := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: 2000,
ReservedCores: []uint16{0, 1},
},
Memory: AllocatedMemoryResources{
MemoryMB: 2048,
MemoryMaxMB: 3048,
},
Networks: []*NetworkResource{
{
CIDR: "10.0.0.0/8",
MBits: 100,
ReservedPorts: []Port{{"ssh", 22, 0, ""}},
},
},
},
Shared: AllocatedSharedResources{
DiskMB: 10000,
},
}
r2 := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: 1000,
ReservedCores: []uint16{0},
},
Memory: AllocatedMemoryResources{
MemoryMB: 1024,
MemoryMaxMB: 1524,
},
Networks: []*NetworkResource{
{
CIDR: "10.0.0.0/8",
MBits: 20,
ReservedPorts: []Port{{"ssh", 22, 0, ""}},
},
},
},
Shared: AllocatedSharedResources{
DiskMB: 5000,
},
}
r1.Subtract(r2)
expect := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: 1000,
ReservedCores: []uint16{1},
},
Memory: AllocatedMemoryResources{
MemoryMB: 1024,
MemoryMaxMB: 1524,
},
Networks: []*NetworkResource{
{
CIDR: "10.0.0.0/8",
2018-11-06 18:26:26 +00:00
MBits: 100,
ReservedPorts: []Port{{"ssh", 22, 0, ""}},
},
},
},
Shared: AllocatedSharedResources{
DiskMB: 5000,
},
}
require := require.New(t)
require.Equal(expect, r1)
}
func TestMemoryResources_Add(t *testing.T) {
ci.Parallel(t)
r := &AllocatedMemoryResources{}
// adding plain no max
r.Add(&AllocatedMemoryResources{
MemoryMB: 100,
})
require.Equal(t, &AllocatedMemoryResources{
MemoryMB: 100,
MemoryMaxMB: 100,
}, r)
// adding with max
r.Add(&AllocatedMemoryResources{
MemoryMB: 100,
MemoryMaxMB: 200,
})
require.Equal(t, &AllocatedMemoryResources{
MemoryMB: 200,
MemoryMaxMB: 300,
}, r)
}
func TestNodeNetworkResource_Copy(t *testing.T) {
ci.Parallel(t)
netResource := &NodeNetworkResource{
Mode: "host",
Device: "eth0",
MacAddress: "00:00:00:00:00:00",
Speed: 1000,
Addresses: []NodeNetworkAddress{
{
Family: NodeNetworkAF_IPv4,
Alias: "default",
Address: "192.168.0.2",
ReservedPorts: "22",
Gateway: "192.168.0.1",
},
},
}
// Copy must be equal.
netResourceCopy := netResource.Copy()
require.Equal(t, netResource, netResourceCopy)
// Modifying copy should not modify original value.
netResourceCopy.Mode = "alloc"
netResourceCopy.Device = "eth1"
netResourceCopy.MacAddress = "11:11:11:11:11:11"
netResourceCopy.Speed = 500
netResourceCopy.Addresses[0].Alias = "copy"
require.NotEqual(t, netResource, netResourceCopy)
}
2015-06-05 22:21:17 +00:00
func TestEncodeDecode(t *testing.T) {
ci.Parallel(t)
2015-06-05 22:21:17 +00:00
type FooRequest struct {
Foo string
Bar int
Baz bool
}
arg := &FooRequest{
Foo: "test",
Bar: 42,
Baz: true,
}
buf, err := Encode(1, arg)
if err != nil {
t.Fatalf("err: %v", err)
}
var out FooRequest
err = Decode(buf[1:], &out)
if err != nil {
t.Fatalf("err: %v", err)
}
if !reflect.DeepEqual(arg, &out) {
t.Fatalf("bad: %#v %#v", arg, out)
}
}
2016-02-21 01:43:17 +00:00
func BenchmarkEncodeDecode(b *testing.B) {
job := testJob()
for i := 0; i < b.N; i++ {
buf, err := Encode(1, job)
if err != nil {
b.Fatalf("err: %v", err)
}
var out Job
err = Decode(buf[1:], &out)
if err != nil {
b.Fatalf("err: %v", err)
}
}
}
2015-11-17 21:36:59 +00:00
func TestInvalidServiceCheck(t *testing.T) {
ci.Parallel(t)
2016-06-12 23:36:49 +00:00
s := Service{
2015-11-17 21:36:59 +00:00
Name: "service-name",
Provider: "consul",
2015-11-17 21:36:59 +00:00
PortLabel: "bar",
2015-11-26 20:40:42 +00:00
Checks: []*ServiceCheck{
2015-11-17 21:36:59 +00:00
{
Name: "check-name",
Type: "lol",
},
},
}
if err := s.Validate(); err == nil {
t.Fatalf("Service should be invalid (invalid type)")
}
2016-06-12 23:36:49 +00:00
s = Service{
Name: "service.name",
Provider: "consul",
PortLabel: "bar",
}
2016-10-24 19:13:47 +00:00
if err := s.ValidateName(s.Name); err == nil {
t.Fatalf("Service should be invalid (contains a dot): %v", err)
}
2016-06-12 23:36:49 +00:00
s = Service{
Name: "-my-service",
Provider: "consul",
PortLabel: "bar",
}
if err := s.Validate(); err == nil {
t.Fatalf("Service should be invalid (begins with a hyphen): %v", err)
}
2016-10-24 19:13:47 +00:00
s = Service{
Name: "my-service-${NOMAD_META_FOO}",
Provider: "consul",
2016-10-24 19:13:47 +00:00
PortLabel: "bar",
}
if err := s.Validate(); err != nil {
t.Fatalf("Service should be valid: %v", err)
}
s = Service{
Name: "my_service-${NOMAD_META_FOO}",
Provider: "consul",
PortLabel: "bar",
}
if err := s.Validate(); err == nil {
t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err)
}
2016-06-12 23:36:49 +00:00
s = Service{
Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456",
Provider: "consul",
PortLabel: "bar",
}
2016-10-24 19:13:47 +00:00
if err := s.ValidateName(s.Name); err == nil {
t.Fatalf("Service should be invalid (too long): %v", err)
}
2016-06-12 23:36:49 +00:00
s = Service{
Name: "service-name",
Provider: "consul",
Checks: []*ServiceCheck{
{
Name: "check-tcp",
Type: ServiceCheckTCP,
Interval: 5 * time.Second,
Timeout: 2 * time.Second,
},
{
Name: "check-http",
Type: ServiceCheckHTTP,
Path: "/foo",
Interval: 5 * time.Second,
Timeout: 2 * time.Second,
},
},
}
if err := s.Validate(); err == nil {
t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err)
}
2016-06-12 23:36:49 +00:00
s = Service{
Name: "service-name",
Provider: "consul",
Checks: []*ServiceCheck{
{
Name: "check-script",
Type: ServiceCheckScript,
Command: "/bin/date",
Interval: 5 * time.Second,
Timeout: 2 * time.Second,
},
},
}
if err := s.Validate(); err != nil {
t.Fatalf("un-expected error: %v", err)
}
s = Service{
Name: "service-name",
Provider: "consul",
Checks: []*ServiceCheck{
{
Name: "tcp-check",
Type: ServiceCheckTCP,
Interval: 5 * time.Second,
Timeout: 2 * time.Second,
},
},
Connect: &ConsulConnect{
SidecarService: &ConsulSidecarService{},
},
}
require.Error(t, s.Validate())
}
2015-11-21 20:34:01 +00:00
2015-12-14 23:57:56 +00:00
func TestDistinctCheckID(t *testing.T) {
ci.Parallel(t)
2015-11-21 20:34:01 +00:00
c1 := ServiceCheck{
Name: "web-health",
Type: "http",
Path: "/health",
Interval: 2 * time.Second,
Timeout: 3 * time.Second,
}
c2 := ServiceCheck{
Name: "web-health",
Type: "http",
Path: "/health1",
Interval: 2 * time.Second,
Timeout: 3 * time.Second,
}
c3 := ServiceCheck{
Name: "web-health",
Type: "http",
Path: "/health",
Interval: 4 * time.Second,
Timeout: 3 * time.Second,
}
2015-12-14 23:57:56 +00:00
serviceID := "123"
c1Hash := c1.Hash(serviceID)
c2Hash := c2.Hash(serviceID)
c3Hash := c3.Hash(serviceID)
2015-11-21 20:34:01 +00:00
2015-11-26 21:47:02 +00:00
if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash {
t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash)
2015-11-21 20:34:01 +00:00
}
}
2016-07-20 23:07:15 +00:00
func TestService_Canonicalize(t *testing.T) {
ci.Parallel(t)
testCases := []struct {
inputService *Service
inputJob string
inputTaskGroup string
inputTask string
inputJobNamespace string
expectedOutputService *Service
name string
}{
{
inputService: &Service{
Name: "${TASK}-db",
},
inputJob: "example",
inputTaskGroup: "cache",
inputTask: "redis",
inputJobNamespace: "platform",
expectedOutputService: &Service{
Name: "redis-db",
Provider: "consul",
Namespace: "default",
},
name: "interpolate task in name",
},
{
inputService: &Service{
Name: "db",
},
inputJob: "example",
inputTaskGroup: "cache",
inputTask: "redis",
inputJobNamespace: "platform",
expectedOutputService: &Service{
Name: "db",
Provider: "consul",
Namespace: "default",
},
name: "no interpolation in name",
},
{
inputService: &Service{
Name: "${JOB}-${TASKGROUP}-${TASK}-db",
},
inputJob: "example",
inputTaskGroup: "cache",
inputTask: "redis",
inputJobNamespace: "platform",
expectedOutputService: &Service{
Name: "example-cache-redis-db",
Provider: "consul",
Namespace: "default",
},
name: "interpolate job, taskgroup and task in name",
},
{
inputService: &Service{
Name: "${BASE}-db",
},
inputJob: "example",
inputTaskGroup: "cache",
inputTask: "redis",
inputJobNamespace: "platform",
expectedOutputService: &Service{
Name: "example-cache-redis-db",
Provider: "consul",
Namespace: "default",
},
name: "interpolate base in name",
},
{
inputService: &Service{
Name: "db",
Provider: "nomad",
},
inputJob: "example",
inputTaskGroup: "cache",
inputTask: "redis",
inputJobNamespace: "platform",
expectedOutputService: &Service{
Name: "db",
Provider: "nomad",
Namespace: "platform",
},
name: "nomad provider",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tc.inputService.Canonicalize(tc.inputJob, tc.inputTaskGroup, tc.inputTask, tc.inputJobNamespace)
assert.Equal(t, tc.expectedOutputService, tc.inputService)
})
}
}
func TestService_Validate(t *testing.T) {
ci.Parallel(t)
testCases := []struct {
inputService *Service
expectedError bool
expectedErrorContains string
name string
}{
{
inputService: &Service{
Name: "testservice",
},
expectedError: false,
name: "base service",
},
{
inputService: &Service{
Name: "testservice",
Connect: &ConsulConnect{
Native: true,
},
},
expectedError: true,
expectedErrorContains: "Connect Native and requires setting the task",
name: "Native Connect without task name",
},
{
inputService: &Service{
Name: "testservice",
TaskName: "testtask",
Connect: &ConsulConnect{
Native: true,
},
},
expectedError: false,
name: "Native Connect with task name",
},
{
inputService: &Service{
Name: "testservice",
TaskName: "testtask",
Connect: &ConsulConnect{
Native: true,
SidecarService: &ConsulSidecarService{},
},
},
expectedError: true,
expectedErrorContains: "Consul Connect must be exclusively native",
name: "Native Connect with Sidecar",
},
{
inputService: &Service{
Name: "testservice",
Provider: "nomad",
Checks: []*ServiceCheck{
{
Name: "servicecheck",
},
},
},
expectedError: true,
expectedErrorContains: "Service with provider nomad cannot include Check blocks",
name: "provider nomad with checks",
},
{
inputService: &Service{
Name: "testservice",
Provider: "nomad",
Connect: &ConsulConnect{
Native: true,
},
},
expectedError: true,
expectedErrorContains: "Service with provider nomad cannot include Connect blocks",
name: "provider nomad with connect",
},
{
inputService: &Service{
Name: "testservice",
Provider: "nomad",
},
expectedError: false,
name: "provider nomad valid",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tc.inputService.Canonicalize("testjob", "testgroup", "testtask", "testnamespace")
err := tc.inputService.Validate()
if tc.expectedError {
assert.Error(t, err)
assert.Contains(t, err.Error(), tc.expectedErrorContains)
} else {
assert.NoError(t, err)
}
})
}
}
func TestService_Equals(t *testing.T) {
ci.Parallel(t)
s := Service{
Name: "testservice",
}
s.Canonicalize("testjob", "testgroup", "testtask", "default")
o := s.Copy()
// Base service should be equal to copy of itself
require.True(t, s.Equals(o))
// create a helper to assert a diff and reset the struct
assertDiff := func() {
require.False(t, s.Equals(o))
o = s.Copy()
require.True(t, s.Equals(o), "bug in copy")
}
// Changing any field should cause inequality
o.Name = "diff"
assertDiff()
o.PortLabel = "diff"
assertDiff()
o.AddressMode = AddressModeDriver
assertDiff()
o.Tags = []string{"diff"}
assertDiff()
o.CanaryTags = []string{"diff"}
assertDiff()
o.Checks = []*ServiceCheck{{Name: "diff"}}
assertDiff()
o.Connect = &ConsulConnect{Native: true}
assertDiff()
client: enable configuring enable_tag_override for services Consul provides a feature of Service Definitions where the tags associated with a service can be modified through the Catalog API, overriding the value(s) configured in the agent's service configuration. To enable this feature, the flag enable_tag_override must be configured in the service definition. Previously, Nomad did not allow configuring this flag, and thus the default value of false was used. Now, it is configurable. Because Nomad itself acts as a state machine around the the service definitions of the tasks it manages, it's worth describing what happens when this feature is enabled and why. Consider the basic case where there is no Nomad, and your service is provided to consul as a boring JSON file. The ultimate source of truth for the definition of that service is the file, and is stored in the agent. Later, Consul performs "anti-entropy" which synchronizes the Catalog (stored only the leaders). Then with enable_tag_override=true, the tags field is available for "external" modification through the Catalog API (rather than directly configuring the service definition file, or using the Agent API). The important observation is that if the service definition ever changes (i.e. the file is changed & config reloaded OR the Agent API is used to modify the service), those "external" tag values are thrown away, and the new service definition is once again the source of truth. In the Nomad case, Nomad itself is the source of truth over the Agent in the same way the JSON file was the source of truth in the example above. That means any time Nomad sets a new service definition, any externally configured tags are going to be replaced. When does this happen? Only on major lifecycle events, for example when a task is modified because of an updated job spec from the 'nomad job run <existing>' command. Otherwise, Nomad's periodic re-sync's with Consul will now no longer try to restore the externally modified tag values (as long as enable_tag_override=true). Fixes #2057
2020-02-07 21:22:19 +00:00
o.EnableTagOverride = true
assertDiff()
o.Provider = "nomad"
assertDiff()
}
func TestJob_ExpandServiceNames(t *testing.T) {
ci.Parallel(t)
j := &Job{
Name: "my-job",
TaskGroups: []*TaskGroup{
2017-09-26 22:26:33 +00:00
{
Name: "web",
Tasks: []*Task{
{
Name: "frontend",
2016-06-12 23:36:49 +00:00
Services: []*Service{
{
Name: "${BASE}-default",
},
{
Name: "jmx",
},
},
},
},
},
2017-09-26 22:26:33 +00:00
{
Name: "admin",
Tasks: []*Task{
{
Name: "admin-web",
},
},
},
},
}
2016-07-20 23:07:15 +00:00
j.Canonicalize()
2016-06-12 23:36:49 +00:00
service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name
if service1Name != "my-job-web-frontend-default" {
t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name)
}
2016-06-12 23:36:49 +00:00
service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name
if service2Name != "jmx" {
t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name)
}
}
2015-12-01 00:51:56 +00:00
func TestJob_CombinedTaskMeta(t *testing.T) {
ci.Parallel(t)
j := &Job{
Meta: map[string]string{
"job_test": "job",
"group_test": "job",
"task_test": "job",
},
TaskGroups: []*TaskGroup{
{
Name: "group",
Meta: map[string]string{
"group_test": "group",
"task_test": "group",
},
Tasks: []*Task{
{
Name: "task",
Meta: map[string]string{
"task_test": "task",
},
},
},
},
},
}
require := require.New(t)
require.EqualValues(map[string]string{
"job_test": "job",
"group_test": "group",
"task_test": "task",
}, j.CombinedTaskMeta("group", "task"))
require.EqualValues(map[string]string{
"job_test": "job",
"group_test": "group",
"task_test": "group",
}, j.CombinedTaskMeta("group", ""))
require.EqualValues(map[string]string{
"job_test": "job",
"group_test": "job",
"task_test": "job",
}, j.CombinedTaskMeta("", "task"))
}
2015-12-01 00:51:56 +00:00
func TestPeriodicConfig_EnabledInvalid(t *testing.T) {
ci.Parallel(t)
2015-12-01 00:51:56 +00:00
// Create a config that is enabled but with no interval specified.
p := &PeriodicConfig{Enabled: true}
if err := p.Validate(); err == nil {
t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid")
}
// Create a config that is enabled, with a spec but no type specified.
p = &PeriodicConfig{Enabled: true, Spec: "foo"}
if err := p.Validate(); err == nil {
t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid")
}
// Create a config that is enabled, with a spec type but no spec specified.
p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron}
if err := p.Validate(); err == nil {
t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid")
}
2017-02-15 22:37:06 +00:00
// Create a config that is enabled, with a bad time zone.
p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"}
if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") {
t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err)
2017-02-15 22:37:06 +00:00
}
2015-12-01 00:51:56 +00:00
}
func TestPeriodicConfig_InvalidCron(t *testing.T) {
ci.Parallel(t)
2015-12-01 00:51:56 +00:00
specs := []string{"foo", "* *", "@foo"}
for _, spec := range specs {
p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
2017-02-15 22:37:06 +00:00
p.Canonicalize()
2015-12-01 00:51:56 +00:00
if err := p.Validate(); err == nil {
t.Fatal("Invalid cron spec")
}
}
}
func TestPeriodicConfig_ValidCron(t *testing.T) {
ci.Parallel(t)
2015-12-01 00:51:56 +00:00
specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"}
for _, spec := range specs {
p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
2017-02-15 22:37:06 +00:00
p.Canonicalize()
2015-12-01 00:51:56 +00:00
if err := p.Validate(); err != nil {
t.Fatal("Passed valid cron")
}
}
}
func TestPeriodicConfig_NextCron(t *testing.T) {
ci.Parallel(t)
2015-12-01 00:51:56 +00:00
from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC)
cases := []struct {
spec string
nextTime time.Time
errorMsg string
}{
{
spec: "0 0 29 2 * 1980",
nextTime: time.Time{},
},
{
spec: "*/5 * * * *",
nextTime: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC),
},
{
spec: "1 15-0 *",
nextTime: time.Time{},
errorMsg: "failed parsing cron expression",
},
}
for i, c := range cases {
t.Run(fmt.Sprintf("case: %d: %s", i, c.spec), func(t *testing.T) {
p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: c.spec}
p.Canonicalize()
n, err := p.Next(from)
require.Equal(t, c.nextTime, n)
if c.errorMsg == "" {
require.NoError(t, err)
} else {
require.Error(t, err)
require.Contains(t, err.Error(), c.errorMsg)
}
})
2015-12-01 00:51:56 +00:00
}
}
2017-02-15 22:37:06 +00:00
func TestPeriodicConfig_ValidTimeZone(t *testing.T) {
ci.Parallel(t)
2017-02-15 22:37:06 +00:00
zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"}
for _, zone := range zones {
p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone}
p.Canonicalize()
if err := p.Validate(); err != nil {
t.Fatalf("Valid tz errored: %v", err)
2017-02-15 22:37:06 +00:00
}
}
}
func TestPeriodicConfig_DST(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
2017-02-15 22:37:06 +00:00
// On Sun, Mar 12, 2:00 am 2017: +1 hour UTC
p := &PeriodicConfig{
Enabled: true,
SpecType: PeriodicSpecCron,
Spec: "0 2 11-13 3 * 2017",
2017-02-15 22:37:06 +00:00
TimeZone: "America/Los_Angeles",
}
p.Canonicalize()
t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location)
t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location)
// E1 is an 8 hour adjustment, E2 is a 7 hour adjustment
e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC)
e2 := time.Date(2017, time.March, 13, 9, 0, 0, 0, time.UTC)
2017-02-15 22:37:06 +00:00
n1, err := p.Next(t1)
require.Nil(err)
2018-04-26 22:15:43 +00:00
n2, err := p.Next(t2)
require.Nil(err)
2017-02-15 22:37:06 +00:00
require.Equal(e1, n1.UTC())
require.Equal(e2, n2.UTC())
2017-02-15 22:37:06 +00:00
}
2019-12-12 18:59:38 +00:00
func TestTaskLifecycleConfig_Validate(t *testing.T) {
ci.Parallel(t)
testCases := []struct {
name string
tlc *TaskLifecycleConfig
err error
}{
{
name: "prestart completed",
tlc: &TaskLifecycleConfig{
Hook: "prestart",
Sidecar: false,
},
err: nil,
},
{
name: "prestart running",
tlc: &TaskLifecycleConfig{
Hook: "prestart",
Sidecar: true,
},
err: nil,
},
{
name: "no hook",
tlc: &TaskLifecycleConfig{
Sidecar: true,
},
err: fmt.Errorf("no lifecycle hook provided"),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := tc.tlc.Validate()
if tc.err != nil {
2019-12-12 18:59:38 +00:00
require.Error(t, err)
require.Contains(t, err.Error(), tc.err.Error())
} else {
require.Nil(t, err)
}
})
}
}
2017-02-15 22:37:06 +00:00
func TestRestartPolicy_Validate(t *testing.T) {
ci.Parallel(t)
// Policy with acceptable restart options passes
p := &RestartPolicy{
Mode: RestartPolicyModeFail,
Attempts: 0,
Interval: 5 * time.Second,
}
if err := p.Validate(); err != nil {
t.Fatalf("err: %v", err)
}
// Policy with ambiguous restart options fails
p = &RestartPolicy{
Mode: RestartPolicyModeDelay,
Attempts: 0,
Interval: 5 * time.Second,
}
if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") {
t.Fatalf("expect ambiguity error, got: %v", err)
}
// Bad policy mode fails
p = &RestartPolicy{
Mode: "nope",
Attempts: 1,
Interval: 5 * time.Second,
}
if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") {
t.Fatalf("expect mode error, got: %v", err)
}
// Fails when attempts*delay does not fit inside interval
p = &RestartPolicy{
Mode: RestartPolicyModeDelay,
Attempts: 3,
Delay: 5 * time.Second,
Interval: 5 * time.Second,
}
if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") {
t.Fatalf("expect restart interval error, got: %v", err)
}
// Fails when interval is to small
p = &RestartPolicy{
Mode: RestartPolicyModeDelay,
Attempts: 3,
Delay: 5 * time.Second,
Interval: 2 * time.Second,
}
if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") {
t.Fatalf("expect interval too small error, got: %v", err)
}
}
func TestReschedulePolicy_Validate(t *testing.T) {
ci.Parallel(t)
type testCase struct {
desc string
ReschedulePolicy *ReschedulePolicy
errors []error
}
testCases := []testCase{
{
desc: "Nil",
},
{
desc: "Disabled",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 0,
Interval: 0 * time.Second},
},
{
desc: "Disabled",
ReschedulePolicy: &ReschedulePolicy{
Attempts: -1,
Interval: 5 * time.Minute},
},
{
desc: "Valid Linear Delay",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 1,
Interval: 5 * time.Minute,
Delay: 10 * time.Second,
2018-03-26 19:45:09 +00:00
DelayFunction: "constant"},
},
{
desc: "Valid Exponential Delay",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 5,
Interval: 1 * time.Hour,
Delay: 30 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 5 * time.Minute,
DelayFunction: "exponential"},
},
{
desc: "Valid Fibonacci Delay",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 5,
Interval: 15 * time.Minute,
Delay: 10 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 5 * time.Minute,
DelayFunction: "fibonacci"},
},
{
desc: "Invalid delay function",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 1,
Interval: 1 * time.Second,
DelayFunction: "blah"},
errors: []error{
fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second),
fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions),
},
},
{
desc: "Invalid delay ceiling",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 1,
Interval: 8 * time.Second,
DelayFunction: "exponential",
Delay: 15 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 5 * time.Second},
errors: []error{
fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)",
2018-02-23 18:57:29 +00:00
15*time.Second, 5*time.Second),
},
},
{
desc: "Invalid delay and interval",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 1,
Interval: 1 * time.Second,
2018-03-26 19:45:09 +00:00
DelayFunction: "constant"},
errors: []error{
fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second),
fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
},
}, {
// Should suggest 2h40m as the interval
desc: "Invalid Attempts - linear delay",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 10,
Interval: 1 * time.Hour,
Delay: 20 * time.Minute,
2018-03-26 19:45:09 +00:00
DelayFunction: "constant",
},
errors: []error{
2018-02-23 18:57:29 +00:00
fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+
2018-03-26 19:45:09 +00:00
" delay function %q", 3, time.Hour, 20*time.Minute, "constant"),
2018-02-23 18:57:29 +00:00
fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
200*time.Minute, 10),
},
},
{
// Should suggest 4h40m as the interval
// Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40}
desc: "Invalid Attempts - exponential delay",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 10,
Interval: 30 * time.Minute,
Delay: 5 * time.Minute,
2018-03-13 15:06:26 +00:00
MaxDelay: 40 * time.Minute,
DelayFunction: "exponential",
},
errors: []error{
fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
2018-02-23 18:57:29 +00:00
"delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute,
"exponential", 40*time.Minute),
fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
280*time.Minute, 10),
},
},
{
// Should suggest 8h as the interval
// Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80}
desc: "Invalid Attempts - fibonacci delay",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 10,
Interval: 1 * time.Hour,
Delay: 20 * time.Minute,
2018-03-13 15:06:26 +00:00
MaxDelay: 80 * time.Minute,
DelayFunction: "fibonacci",
},
errors: []error{
fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
2018-02-23 18:57:29 +00:00
"delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute,
"fibonacci", 80*time.Minute),
fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
480*time.Minute, 10),
},
},
{
desc: "Ambiguous Unlimited config, has both attempts and unlimited set",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 1,
Unlimited: true,
DelayFunction: "exponential",
Delay: 5 * time.Minute,
2018-03-13 15:06:26 +00:00
MaxDelay: 1 * time.Hour,
},
errors: []error{
fmt.Errorf("Interval must be a non zero value if Attempts > 0"),
fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true),
},
},
{
desc: "Invalid Unlimited config",
ReschedulePolicy: &ReschedulePolicy{
Attempts: 1,
Interval: 1 * time.Second,
Unlimited: true,
DelayFunction: "exponential",
},
errors: []error{
fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
},
},
{
desc: "Valid Unlimited config",
ReschedulePolicy: &ReschedulePolicy{
Unlimited: true,
DelayFunction: "exponential",
Delay: 5 * time.Second,
MaxDelay: 1 * time.Hour,
},
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
require := require.New(t)
gotErr := tc.ReschedulePolicy.Validate()
if tc.errors != nil {
// Validate all errors
for _, err := range tc.errors {
require.Contains(gotErr.Error(), err.Error())
}
} else {
require.Nil(gotErr)
}
})
}
}
func TestAllocation_Index(t *testing.T) {
ci.Parallel(t)
2017-05-31 18:34:46 +00:00
a1 := Allocation{
Name: "example.cache[1]",
TaskGroup: "cache",
JobID: "example",
Job: &Job{
ID: "example",
TaskGroups: []*TaskGroup{{Name: "cache"}}},
}
e1 := uint(1)
a2 := a1.Copy()
a2.Name = "example.cache[713127]"
e2 := uint(713127)
if a1.Index() != e1 || a2.Index() != e2 {
2017-05-31 18:34:46 +00:00
t.Fatalf("Got %d and %d", a1.Index(), a2.Index())
}
}
2016-03-15 02:55:30 +00:00
func TestTaskArtifact_Validate_Source(t *testing.T) {
ci.Parallel(t)
2016-03-15 02:55:30 +00:00
valid := &TaskArtifact{GetterSource: "google.com"}
if err := valid.Validate(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
func TestTaskArtifact_Validate_Dest(t *testing.T) {
ci.Parallel(t)
valid := &TaskArtifact{GetterSource: "google.com"}
if err := valid.Validate(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
valid.RelativeDest = "local/"
if err := valid.Validate(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
valid.RelativeDest = "local/.."
if err := valid.Validate(); err != nil {
t.Fatalf("unexpected error: %v", err)
}
2016-12-18 23:48:30 +00:00
valid.RelativeDest = "local/../../.."
if err := valid.Validate(); err == nil {
t.Fatalf("expected error: %v", err)
}
}
// TestTaskArtifact_Hash asserts an artifact's hash changes when any of the
// fields change.
func TestTaskArtifact_Hash(t *testing.T) {
ci.Parallel(t)
cases := []TaskArtifact{
{},
{
GetterSource: "a",
},
{
GetterSource: "b",
},
{
GetterSource: "b",
GetterOptions: map[string]string{"c": "c"},
},
{
GetterSource: "b",
GetterOptions: map[string]string{
"c": "c",
"d": "d",
},
},
{
GetterSource: "b",
GetterOptions: map[string]string{
"c": "c",
"d": "e",
},
},
{
GetterSource: "b",
GetterOptions: map[string]string{
"c": "c",
"d": "e",
},
GetterMode: "f",
},
{
GetterSource: "b",
GetterOptions: map[string]string{
"c": "c",
"d": "e",
},
GetterMode: "g",
},
{
GetterSource: "b",
GetterOptions: map[string]string{
"c": "c",
"d": "e",
},
GetterMode: "g",
RelativeDest: "h",
},
{
GetterSource: "b",
GetterOptions: map[string]string{
"c": "c",
"d": "e",
},
GetterMode: "g",
RelativeDest: "i",
},
}
// Map of hash to source
hashes := make(map[string]TaskArtifact, len(cases))
for _, tc := range cases {
h := tc.Hash()
// Hash should be deterministic
require.Equal(t, h, tc.Hash())
// Hash should be unique
if orig, ok := hashes[h]; ok {
require.Failf(t, "hashes match", "artifact 1: %s\n\n artifact 2: %s\n",
pretty.Sprint(tc), pretty.Sprint(orig),
)
}
hashes[h] = tc
}
require.Len(t, hashes, len(cases))
}
func TestAllocation_ShouldMigrate(t *testing.T) {
ci.Parallel(t)
alloc := Allocation{
2017-10-12 01:08:37 +00:00
PreviousAllocation: "123",
TaskGroup: "foo",
Job: &Job{
TaskGroups: []*TaskGroup{
{
Name: "foo",
EphemeralDisk: &EphemeralDisk{
Migrate: true,
Sticky: true,
},
},
},
},
}
if !alloc.ShouldMigrate() {
t.Fatalf("bad: %v", alloc)
}
alloc1 := Allocation{
2017-10-12 01:08:37 +00:00
PreviousAllocation: "123",
TaskGroup: "foo",
Job: &Job{
TaskGroups: []*TaskGroup{
{
Name: "foo",
EphemeralDisk: &EphemeralDisk{},
},
},
},
}
if alloc1.ShouldMigrate() {
t.Fatalf("bad: %v", alloc)
}
alloc2 := Allocation{
2017-10-12 01:08:37 +00:00
PreviousAllocation: "123",
TaskGroup: "foo",
Job: &Job{
TaskGroups: []*TaskGroup{
{
Name: "foo",
EphemeralDisk: &EphemeralDisk{
Sticky: false,
Migrate: true,
},
},
},
},
}
if alloc2.ShouldMigrate() {
t.Fatalf("bad: %v", alloc)
}
alloc3 := Allocation{
2017-10-12 01:08:37 +00:00
PreviousAllocation: "123",
TaskGroup: "foo",
Job: &Job{
TaskGroups: []*TaskGroup{
{
Name: "foo",
},
},
},
}
if alloc3.ShouldMigrate() {
t.Fatalf("bad: %v", alloc)
}
2017-10-12 01:08:37 +00:00
// No previous
alloc4 := Allocation{
TaskGroup: "foo",
Job: &Job{
TaskGroups: []*TaskGroup{
{
Name: "foo",
EphemeralDisk: &EphemeralDisk{
Migrate: true,
Sticky: true,
},
},
},
},
}
if alloc4.ShouldMigrate() {
t.Fatalf("bad: %v", alloc4)
}
}
func TestTaskArtifact_Validate_Checksum(t *testing.T) {
ci.Parallel(t)
cases := []struct {
Input *TaskArtifact
Err bool
}{
{
&TaskArtifact{
GetterSource: "foo.com",
GetterOptions: map[string]string{
"checksum": "no-type",
},
},
true,
},
{
&TaskArtifact{
GetterSource: "foo.com",
GetterOptions: map[string]string{
"checksum": "md5:toosmall",
},
},
true,
},
{
&TaskArtifact{
GetterSource: "foo.com",
GetterOptions: map[string]string{
"checksum": "invalid:type",
},
},
true,
},
{
&TaskArtifact{
GetterSource: "foo.com",
GetterOptions: map[string]string{
"checksum": "md5:${ARTIFACT_CHECKSUM}",
},
},
false,
},
}
for i, tc := range cases {
err := tc.Input.Validate()
if (err != nil) != tc.Err {
t.Fatalf("case %d: %v", i, err)
continue
}
}
}
func TestPlan_NormalizeAllocations(t *testing.T) {
ci.Parallel(t)
2019-03-05 21:41:41 +00:00
plan := &Plan{
NodeUpdate: make(map[string][]*Allocation),
2019-03-05 21:41:41 +00:00
NodePreemptions: make(map[string][]*Allocation),
}
stoppedAlloc := MockAlloc()
desiredDesc := "Desired desc"
plan.AppendStoppedAlloc(stoppedAlloc, desiredDesc, AllocClientStatusLost, "followup-eval-id")
2019-03-05 21:41:41 +00:00
preemptedAlloc := MockAlloc()
preemptingAllocID := uuid.Generate()
plan.AppendPreemptedAlloc(preemptedAlloc, preemptingAllocID)
plan.NormalizeAllocations()
actualStoppedAlloc := plan.NodeUpdate[stoppedAlloc.NodeID][0]
expectedStoppedAlloc := &Allocation{
ID: stoppedAlloc.ID,
DesiredDescription: desiredDesc,
ClientStatus: AllocClientStatusLost,
FollowupEvalID: "followup-eval-id",
2019-03-05 21:41:41 +00:00
}
assert.Equal(t, expectedStoppedAlloc, actualStoppedAlloc)
actualPreemptedAlloc := plan.NodePreemptions[preemptedAlloc.NodeID][0]
expectedPreemptedAlloc := &Allocation{
ID: preemptedAlloc.ID,
PreemptedByAllocation: preemptingAllocID,
}
assert.Equal(t, expectedPreemptedAlloc, actualPreemptedAlloc)
}
func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) {
ci.Parallel(t)
2019-03-05 21:41:41 +00:00
plan := &Plan{
NodeUpdate: make(map[string][]*Allocation),
}
alloc := MockAlloc()
desiredDesc := "Desired desc"
plan.AppendStoppedAlloc(alloc, desiredDesc, AllocClientStatusLost, "")
2019-03-05 21:41:41 +00:00
expectedAlloc := new(Allocation)
*expectedAlloc = *alloc
expectedAlloc.DesiredDescription = desiredDesc
expectedAlloc.DesiredStatus = AllocDesiredStatusStop
expectedAlloc.ClientStatus = AllocClientStatusLost
expectedAlloc.Job = nil
expectedAlloc.AllocStates = []*AllocState{{
Field: AllocStateFieldClientStatus,
Value: "lost",
}}
// This value is set to time.Now() in AppendStoppedAlloc, so clear it
appendedAlloc := plan.NodeUpdate[alloc.NodeID][0]
appendedAlloc.AllocStates[0].Time = time.Time{}
2019-03-05 21:41:41 +00:00
assert.Equal(t, expectedAlloc, appendedAlloc)
assert.Equal(t, alloc.Job, plan.Job)
}
func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) {
ci.Parallel(t)
2019-03-05 21:41:41 +00:00
plan := &Plan{
NodePreemptions: make(map[string][]*Allocation),
}
alloc := MockAlloc()
preemptingAllocID := uuid.Generate()
plan.AppendPreemptedAlloc(alloc, preemptingAllocID)
appendedAlloc := plan.NodePreemptions[alloc.NodeID][0]
expectedAlloc := &Allocation{
ID: alloc.ID,
PreemptedByAllocation: preemptingAllocID,
JobID: alloc.JobID,
Namespace: alloc.Namespace,
DesiredStatus: AllocDesiredStatusEvict,
DesiredDescription: fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID),
AllocatedResources: alloc.AllocatedResources,
TaskResources: alloc.TaskResources,
SharedResources: alloc.SharedResources,
}
assert.Equal(t, expectedAlloc, appendedAlloc)
}
func TestAllocation_MsgPackTags(t *testing.T) {
ci.Parallel(t)
2019-03-05 21:41:41 +00:00
planType := reflect.TypeOf(Allocation{})
msgPackTags, _ := planType.FieldByName("_struct")
assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`))
}
func TestEvaluation_MsgPackTags(t *testing.T) {
ci.Parallel(t)
2019-03-05 21:41:41 +00:00
planType := reflect.TypeOf(Evaluation{})
msgPackTags, _ := planType.FieldByName("_struct")
assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`))
}
func TestAllocation_Terminated(t *testing.T) {
ci.Parallel(t)
type desiredState struct {
ClientStatus string
DesiredStatus string
Terminated bool
}
harness := []desiredState{
{
ClientStatus: AllocClientStatusPending,
DesiredStatus: AllocDesiredStatusStop,
Terminated: false,
},
{
ClientStatus: AllocClientStatusRunning,
DesiredStatus: AllocDesiredStatusStop,
Terminated: false,
},
{
ClientStatus: AllocClientStatusFailed,
DesiredStatus: AllocDesiredStatusStop,
Terminated: true,
},
{
ClientStatus: AllocClientStatusFailed,
DesiredStatus: AllocDesiredStatusRun,
Terminated: true,
},
}
for _, state := range harness {
alloc := Allocation{}
alloc.DesiredStatus = state.DesiredStatus
alloc.ClientStatus = state.ClientStatus
if alloc.Terminated() != state.Terminated {
t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated())
}
}
}
2016-10-11 22:25:49 +00:00
func TestAllocation_ShouldReschedule(t *testing.T) {
ci.Parallel(t)
type testCase struct {
Desc string
FailTime time.Time
ClientStatus string
DesiredStatus string
ReschedulePolicy *ReschedulePolicy
RescheduleTrackers []*RescheduleEvent
ShouldReschedule bool
}
fail := time.Now()
harness := []testCase{
{
Desc: "Reschedule when desired state is stop",
ClientStatus: AllocClientStatusPending,
DesiredStatus: AllocDesiredStatusStop,
FailTime: fail,
ReschedulePolicy: nil,
ShouldReschedule: false,
},
{
2018-03-11 18:40:32 +00:00
Desc: "Disabled rescheduling",
ClientStatus: AllocClientStatusFailed,
DesiredStatus: AllocDesiredStatusRun,
FailTime: fail,
ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute},
ShouldReschedule: false,
},
{
Desc: "Reschedule when client status is complete",
ClientStatus: AllocClientStatusComplete,
DesiredStatus: AllocDesiredStatusRun,
FailTime: fail,
ReschedulePolicy: nil,
ShouldReschedule: false,
},
{
Desc: "Reschedule with nil reschedule policy",
ClientStatus: AllocClientStatusFailed,
DesiredStatus: AllocDesiredStatusRun,
FailTime: fail,
ReschedulePolicy: nil,
ShouldReschedule: false,
},
{
Desc: "Reschedule with unlimited and attempts >0",
ClientStatus: AllocClientStatusFailed,
DesiredStatus: AllocDesiredStatusRun,
FailTime: fail,
ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true},
ShouldReschedule: true,
},
{
Desc: "Reschedule when client status is complete",
ClientStatus: AllocClientStatusComplete,
DesiredStatus: AllocDesiredStatusRun,
FailTime: fail,
ReschedulePolicy: nil,
ShouldReschedule: false,
},
{
Desc: "Reschedule with policy when client status complete",
ClientStatus: AllocClientStatusComplete,
DesiredStatus: AllocDesiredStatusRun,
FailTime: fail,
ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
ShouldReschedule: false,
},
{
Desc: "Reschedule with no previous attempts",
ClientStatus: AllocClientStatusFailed,
DesiredStatus: AllocDesiredStatusRun,
FailTime: fail,
ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
ShouldReschedule: true,
},
{
Desc: "Reschedule with leftover attempts",
ClientStatus: AllocClientStatusFailed,
DesiredStatus: AllocDesiredStatusRun,
ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute},
FailTime: fail,
RescheduleTrackers: []*RescheduleEvent{
{
RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(),
},
},
ShouldReschedule: true,
},
{
Desc: "Reschedule with too old previous attempts",
ClientStatus: AllocClientStatusFailed,
DesiredStatus: AllocDesiredStatusRun,
FailTime: fail,
ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute},
RescheduleTrackers: []*RescheduleEvent{
{
RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(),
},
},
ShouldReschedule: true,
},
{
Desc: "Reschedule with no leftover attempts",
ClientStatus: AllocClientStatusFailed,
DesiredStatus: AllocDesiredStatusRun,
FailTime: fail,
ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute},
RescheduleTrackers: []*RescheduleEvent{
{
RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(),
},
{
RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(),
},
},
ShouldReschedule: false,
},
}
for _, state := range harness {
alloc := Allocation{}
alloc.DesiredStatus = state.DesiredStatus
alloc.ClientStatus = state.ClientStatus
alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers}
t.Run(state.Desc, func(t *testing.T) {
if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule {
t.Fatalf("expected %v but got %v", state.ShouldReschedule, got)
}
})
}
}
func TestAllocation_LastEventTime(t *testing.T) {
ci.Parallel(t)
type testCase struct {
desc string
taskState map[string]*TaskState
expectedLastEventTime time.Time
}
t1 := time.Now().UTC()
testCases := []testCase{
{
2018-09-04 23:03:52 +00:00
desc: "nil task state",
expectedLastEventTime: t1,
},
{
desc: "empty task state",
taskState: make(map[string]*TaskState),
expectedLastEventTime: t1,
},
{
desc: "Finished At not set",
taskState: map[string]*TaskState{"foo": {State: "start",
StartedAt: t1.Add(-2 * time.Hour)}},
expectedLastEventTime: t1,
},
{
desc: "One finished ",
taskState: map[string]*TaskState{"foo": {State: "start",
StartedAt: t1.Add(-2 * time.Hour),
FinishedAt: t1.Add(-1 * time.Hour)}},
expectedLastEventTime: t1.Add(-1 * time.Hour),
},
{
desc: "Multiple task groups",
taskState: map[string]*TaskState{"foo": {State: "start",
StartedAt: t1.Add(-2 * time.Hour),
FinishedAt: t1.Add(-1 * time.Hour)},
"bar": {State: "start",
StartedAt: t1.Add(-2 * time.Hour),
FinishedAt: t1.Add(-40 * time.Minute)}},
expectedLastEventTime: t1.Add(-40 * time.Minute),
},
{
desc: "No finishedAt set, one task event, should use modify time",
taskState: map[string]*TaskState{"foo": {
State: "run",
StartedAt: t1.Add(-2 * time.Hour),
Events: []*TaskEvent{
{Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()},
}},
},
expectedLastEventTime: t1,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()}
alloc.TaskStates = tc.taskState
require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime())
})
}
}
func TestAllocation_NextDelay(t *testing.T) {
ci.Parallel(t)
type testCase struct {
desc string
reschedulePolicy *ReschedulePolicy
alloc *Allocation
expectedRescheduleTime time.Time
expectedRescheduleEligible bool
}
now := time.Now()
testCases := []testCase{
{
desc: "Allocation hasn't failed yet",
reschedulePolicy: &ReschedulePolicy{
2018-03-26 19:45:09 +00:00
DelayFunction: "constant",
Delay: 5 * time.Second,
},
2018-09-04 23:03:52 +00:00
alloc: &Allocation{},
expectedRescheduleTime: time.Time{},
expectedRescheduleEligible: false,
},
{
desc: "Allocation has no reschedule policy",
alloc: &Allocation{},
expectedRescheduleTime: time.Time{},
expectedRescheduleEligible: false,
},
{
desc: "Allocation lacks task state",
reschedulePolicy: &ReschedulePolicy{
2018-03-26 19:45:09 +00:00
DelayFunction: "constant",
Delay: 5 * time.Second,
2018-03-29 12:59:38 +00:00
Unlimited: true,
},
2018-09-04 23:03:52 +00:00
alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()},
2018-03-29 12:59:38 +00:00
expectedRescheduleTime: now.UTC().Add(5 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "linear delay, unlimited restarts, no reschedule tracker",
reschedulePolicy: &ReschedulePolicy{
2018-03-26 19:45:09 +00:00
DelayFunction: "constant",
Delay: 5 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "dead",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-2 * time.Second)}},
},
expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "linear delay with reschedule tracker",
reschedulePolicy: &ReschedulePolicy{
2018-03-26 19:45:09 +00:00
DelayFunction: "constant",
Delay: 5 * time.Second,
Interval: 10 * time.Minute,
Attempts: 2,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-2 * time.Second)}},
RescheduleTracker: &RescheduleTracker{
Events: []*RescheduleEvent{{
RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(),
Delay: 5 * time.Second,
}},
}},
expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "linear delay with reschedule tracker, attempts exhausted",
reschedulePolicy: &ReschedulePolicy{
2018-03-26 19:45:09 +00:00
DelayFunction: "constant",
Delay: 5 * time.Second,
Interval: 10 * time.Minute,
Attempts: 2,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-2 * time.Second)}},
RescheduleTracker: &RescheduleTracker{
Events: []*RescheduleEvent{
{
RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(),
Delay: 5 * time.Second,
},
},
}},
expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second),
expectedRescheduleEligible: false,
},
{
desc: "exponential delay - no reschedule tracker",
reschedulePolicy: &ReschedulePolicy{
DelayFunction: "exponential",
Delay: 5 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 90 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-2 * time.Second)}},
},
expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "exponential delay with reschedule tracker",
reschedulePolicy: &ReschedulePolicy{
DelayFunction: "exponential",
Delay: 5 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 90 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-2 * time.Second)}},
RescheduleTracker: &RescheduleTracker{
Events: []*RescheduleEvent{
{
RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 10 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 20 * time.Second,
},
},
}},
expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "exponential delay with delay ceiling reached",
reschedulePolicy: &ReschedulePolicy{
DelayFunction: "exponential",
Delay: 5 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 90 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-15 * time.Second)}},
RescheduleTracker: &RescheduleTracker{
Events: []*RescheduleEvent{
{
RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 10 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 20 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 40 * time.Second,
},
{
RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(),
Delay: 80 * time.Second,
},
},
}},
expectedRescheduleTime: now.Add(-15 * time.Second).Add(90 * time.Second),
expectedRescheduleEligible: true,
},
{
// Test case where most recent reschedule ran longer than delay ceiling
desc: "exponential delay, delay ceiling reset condition met",
reschedulePolicy: &ReschedulePolicy{
DelayFunction: "exponential",
Delay: 5 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 90 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-15 * time.Minute)}},
RescheduleTracker: &RescheduleTracker{
Events: []*RescheduleEvent{
{
RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 10 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 20 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 40 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 80 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 90 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 90 * time.Second,
},
},
}},
expectedRescheduleTime: now.Add(-15 * time.Minute).Add(5 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "fibonacci delay - no reschedule tracker",
reschedulePolicy: &ReschedulePolicy{
DelayFunction: "fibonacci",
Delay: 5 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 90 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-2 * time.Second)}}},
expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "fibonacci delay with reschedule tracker",
reschedulePolicy: &ReschedulePolicy{
DelayFunction: "fibonacci",
Delay: 5 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 90 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-2 * time.Second)}},
RescheduleTracker: &RescheduleTracker{
Events: []*RescheduleEvent{
{
RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(),
Delay: 5 * time.Second,
},
},
}},
expectedRescheduleTime: now.Add(-2 * time.Second).Add(10 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "fibonacci delay with more events",
reschedulePolicy: &ReschedulePolicy{
DelayFunction: "fibonacci",
Delay: 5 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 90 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-2 * time.Second)}},
RescheduleTracker: &RescheduleTracker{
Events: []*RescheduleEvent{
{
RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 10 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 15 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 25 * time.Second,
},
},
}},
expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "fibonacci delay with delay ceiling reached",
reschedulePolicy: &ReschedulePolicy{
DelayFunction: "fibonacci",
Delay: 5 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 50 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-15 * time.Second)}},
RescheduleTracker: &RescheduleTracker{
Events: []*RescheduleEvent{
{
RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 10 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 15 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 25 * time.Second,
},
{
RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(),
Delay: 40 * time.Second,
},
},
}},
expectedRescheduleTime: now.Add(-15 * time.Second).Add(50 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "fibonacci delay with delay reset condition met",
reschedulePolicy: &ReschedulePolicy{
DelayFunction: "fibonacci",
Delay: 5 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 50 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-5 * time.Minute)}},
RescheduleTracker: &RescheduleTracker{
Events: []*RescheduleEvent{
{
RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 10 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 15 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 25 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 40 * time.Second,
},
},
}},
expectedRescheduleTime: now.Add(-5 * time.Minute).Add(5 * time.Second),
expectedRescheduleEligible: true,
},
{
desc: "fibonacci delay with the most recent event that reset delay value",
reschedulePolicy: &ReschedulePolicy{
DelayFunction: "fibonacci",
Delay: 5 * time.Second,
2018-03-13 15:06:26 +00:00
MaxDelay: 50 * time.Second,
Unlimited: true,
},
alloc: &Allocation{
ClientStatus: AllocClientStatusFailed,
TaskStates: map[string]*TaskState{"foo": {State: "start",
StartedAt: now.Add(-1 * time.Hour),
FinishedAt: now.Add(-5 * time.Second)}},
RescheduleTracker: &RescheduleTracker{
Events: []*RescheduleEvent{
{
RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 5 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 10 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 15 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 25 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 40 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
Delay: 50 * time.Second,
},
{
RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(),
Delay: 5 * time.Second,
},
},
}},
expectedRescheduleTime: now.Add(-5 * time.Second).Add(5 * time.Second),
expectedRescheduleEligible: true,
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
require := require.New(t)
j := testJob()
if tc.reschedulePolicy != nil {
j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy
}
tc.alloc.Job = j
tc.alloc.TaskGroup = j.TaskGroups[0].Name
reschedTime, allowed := tc.alloc.NextRescheduleTime()
require.Equal(tc.expectedRescheduleEligible, allowed)
require.Equal(tc.expectedRescheduleTime, reschedTime)
})
}
}
func TestAllocation_WaitClientStop(t *testing.T) {
ci.Parallel(t)
type testCase struct {
desc string
stop time.Duration
status string
expectedShould bool
expectedRescheduleTime time.Time
}
now := time.Now().UTC()
testCases := []testCase{
{
desc: "running",
stop: 2 * time.Second,
status: AllocClientStatusRunning,
expectedShould: true,
},
{
desc: "no stop_after_client_disconnect",
status: AllocClientStatusLost,
expectedShould: false,
},
{
desc: "stop",
status: AllocClientStatusLost,
stop: 2 * time.Second,
expectedShould: true,
expectedRescheduleTime: now.Add((2 + 5) * time.Second),
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
j := testJob()
a := &Allocation{
ClientStatus: tc.status,
Job: j,
TaskStates: map[string]*TaskState{},
}
if tc.status == AllocClientStatusLost {
a.AppendState(AllocStateFieldClientStatus, AllocClientStatusLost)
}
j.TaskGroups[0].StopAfterClientDisconnect = &tc.stop
a.TaskGroup = j.TaskGroups[0].Name
require.Equal(t, tc.expectedShould, a.ShouldClientStop())
if !tc.expectedShould || tc.status != AllocClientStatusLost {
return
}
// the reschedTime is close to the expectedRescheduleTime
reschedTime := a.WaitClientStop()
e := reschedTime.Unix() - tc.expectedRescheduleTime.Unix()
require.Less(t, e, int64(2))
})
}
}
func TestAllocation_Canonicalize_Old(t *testing.T) {
ci.Parallel(t)
alloc := MockAlloc()
alloc.AllocatedResources = nil
alloc.TaskResources = map[string]*Resources{
"web": {
CPU: 500,
MemoryMB: 256,
Networks: []*NetworkResource{
{
Device: "eth0",
IP: "192.168.0.100",
ReservedPorts: []Port{{Label: "admin", Value: 5000}},
MBits: 50,
DynamicPorts: []Port{{Label: "http", Value: 9876}},
},
},
},
}
alloc.SharedResources = &Resources{
DiskMB: 150,
}
alloc.Canonicalize()
expected := &AllocatedResources{
Tasks: map[string]*AllocatedTaskResources{
"web": {
Cpu: AllocatedCpuResources{
CpuShares: 500,
},
Memory: AllocatedMemoryResources{
MemoryMB: 256,
},
Networks: []*NetworkResource{
{
Device: "eth0",
IP: "192.168.0.100",
ReservedPorts: []Port{{Label: "admin", Value: 5000}},
MBits: 50,
DynamicPorts: []Port{{Label: "http", Value: 9876}},
},
},
},
},
Shared: AllocatedSharedResources{
DiskMB: 150,
},
}
require.Equal(t, expected, alloc.AllocatedResources)
}
// TestAllocation_Canonicalize_New asserts that an alloc with latest
// schema isn't modified with Canonicalize
func TestAllocation_Canonicalize_New(t *testing.T) {
ci.Parallel(t)
alloc := MockAlloc()
copy := alloc.Copy()
alloc.Canonicalize()
require.Equal(t, copy, alloc)
}
2018-01-17 17:44:06 +00:00
func TestRescheduleTracker_Copy(t *testing.T) {
ci.Parallel(t)
2018-01-17 17:44:06 +00:00
type testCase struct {
original *RescheduleTracker
expected *RescheduleTracker
}
cases := []testCase{
{nil, nil},
{&RescheduleTracker{Events: []*RescheduleEvent{
{RescheduleTime: 2,
PrevAllocID: "12",
PrevNodeID: "12",
Delay: 30 * time.Second},
2018-01-17 17:44:06 +00:00
}}, &RescheduleTracker{Events: []*RescheduleEvent{
{RescheduleTime: 2,
PrevAllocID: "12",
PrevNodeID: "12",
Delay: 30 * time.Second},
2018-01-17 17:44:06 +00:00
}}},
}
for _, tc := range cases {
if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) {
t.Fatalf("expected %v but got %v", *tc.expected, *got)
}
}
}
2016-10-11 22:25:49 +00:00
func TestVault_Validate(t *testing.T) {
ci.Parallel(t)
2016-10-11 22:25:49 +00:00
v := &Vault{
Env: true,
ChangeMode: VaultChangeModeNoop,
}
if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") {
t.Fatalf("Expected policy list empty error")
}
v.Policies = []string{"foo", "root"}
2016-10-11 22:25:49 +00:00
v.ChangeMode = VaultChangeModeSignal
err := v.Validate()
if err == nil {
t.Fatalf("Expected validation errors")
}
if !strings.Contains(err.Error(), "Signal must") {
2016-10-11 22:25:49 +00:00
t.Fatalf("Expected signal empty error")
}
if !strings.Contains(err.Error(), "root") {
t.Fatalf("Expected root error")
}
2016-10-11 22:25:49 +00:00
}
2016-11-23 22:56:50 +00:00
func TestVault_Copy(t *testing.T) {
v := &Vault{
Policies: []string{"policy1", "policy2"},
Namespace: "ns1",
Env: false,
ChangeMode: "noop",
ChangeSignal: "SIGKILL",
EntityAlias: "alias1",
}
// Copy and modify.
vc := v.Copy()
vc.Policies[0] = "policy0"
vc.Namespace = "ns2"
vc.Env = true
vc.ChangeMode = "signal"
vc.ChangeSignal = "SIGHUP"
vc.EntityAlias = "alias2"
require.NotEqual(t, v, vc)
}
func TestVault_Canonicalize(t *testing.T) {
v := &Vault{
ChangeSignal: "sighup",
}
v.Canonicalize()
require.Equal(t, "SIGHUP", v.ChangeSignal)
require.Equal(t, VaultChangeModeRestart, v.ChangeMode)
}
func TestParameterizedJobConfig_Validate(t *testing.T) {
ci.Parallel(t)
d := &ParameterizedJobConfig{
2016-12-14 22:51:28 +00:00
Payload: "foo",
2016-11-23 22:56:50 +00:00
}
2016-12-14 22:51:28 +00:00
if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") {
t.Fatalf("Expected unknown payload requirement: %v", err)
2016-11-23 22:56:50 +00:00
}
2016-12-14 22:51:28 +00:00
d.Payload = DispatchPayloadOptional
2016-11-23 22:56:50 +00:00
d.MetaOptional = []string{"foo", "bar"}
d.MetaRequired = []string{"bar", "baz"}
if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") {
t.Fatalf("Expected meta not being disjoint error: %v", err)
}
}
func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) {
ci.Parallel(t)
2016-12-16 23:20:12 +00:00
job := testJob()
job.ParameterizedJob = &ParameterizedJobConfig{
2016-12-16 23:20:12 +00:00
Payload: DispatchPayloadOptional,
}
job.Type = JobTypeSystem
if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") {
t.Fatalf("Expected bad scheduler tpye: %v", err)
}
}
func TestJobConfig_Validate_StopAferClientDisconnect(t *testing.T) {
ci.Parallel(t)
// Setup a system Job with stop_after_client_disconnect set, which is invalid
job := testJob()
job.Type = JobTypeSystem
stop := 1 * time.Minute
job.TaskGroups[0].StopAfterClientDisconnect = &stop
err := job.Validate()
require.Error(t, err)
require.Contains(t, err.Error(), "stop_after_client_disconnect can only be set in batch and service jobs")
// Modify the job to a batch job with an invalid stop_after_client_disconnect value
job.Type = JobTypeBatch
invalid := -1 * time.Minute
job.TaskGroups[0].StopAfterClientDisconnect = &invalid
err = job.Validate()
require.Error(t, err)
require.Contains(t, err.Error(), "stop_after_client_disconnect must be a positive value")
// Modify the job to a batch job with a valid stop_after_client_disconnect value
job.Type = JobTypeBatch
job.TaskGroups[0].StopAfterClientDisconnect = &stop
err = job.Validate()
require.NoError(t, err)
}
func TestParameterizedJobConfig_Canonicalize(t *testing.T) {
ci.Parallel(t)
d := &ParameterizedJobConfig{}
2016-11-23 22:56:50 +00:00
d.Canonicalize()
2016-12-14 22:51:28 +00:00
if d.Payload != DispatchPayloadOptional {
2016-11-23 22:56:50 +00:00
t.Fatalf("Canonicalize failed")
}
}
2016-12-18 23:48:30 +00:00
func TestDispatchPayloadConfig_Validate(t *testing.T) {
ci.Parallel(t)
d := &DispatchPayloadConfig{
2016-12-18 23:48:30 +00:00
File: "foo",
}
// task/local/haha
if err := d.Validate(); err != nil {
t.Fatalf("bad: %v", err)
}
// task/haha
d.File = "../haha"
if err := d.Validate(); err != nil {
t.Fatalf("bad: %v", err)
}
// ../haha
d.File = "../../../haha"
if err := d.Validate(); err == nil {
t.Fatalf("bad: %v", err)
}
}
2020-09-29 21:57:46 +00:00
func TestScalingPolicy_Canonicalize(t *testing.T) {
ci.Parallel(t)
2020-09-29 21:57:46 +00:00
cases := []struct {
name string
input *ScalingPolicy
expected *ScalingPolicy
}{
{
name: "empty policy",
input: &ScalingPolicy{},
expected: &ScalingPolicy{Type: ScalingPolicyTypeHorizontal},
},
{
name: "policy with type",
input: &ScalingPolicy{Type: "other-type"},
expected: &ScalingPolicy{Type: "other-type"},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
require := require.New(t)
c.input.Canonicalize()
require.Equal(c.expected, c.input)
})
}
}
func TestScalingPolicy_Validate(t *testing.T) {
ci.Parallel(t)
2020-09-29 21:57:46 +00:00
type testCase struct {
name string
input *ScalingPolicy
expectedErr string
}
cases := []testCase{
{
name: "full horizontal policy",
input: &ScalingPolicy{
Policy: map[string]interface{}{
"key": "value",
},
Type: ScalingPolicyTypeHorizontal,
Min: 5,
Max: 5,
Enabled: true,
Target: map[string]string{
ScalingTargetNamespace: "my-namespace",
ScalingTargetJob: "my-job",
ScalingTargetGroup: "my-task-group",
},
},
},
{
name: "missing type",
input: &ScalingPolicy{},
expectedErr: "missing scaling policy type",
},
{
name: "invalid type",
input: &ScalingPolicy{
Type: "not valid",
},
expectedErr: `scaling policy type "not valid" is not valid`,
},
{
name: "min < 0",
input: &ScalingPolicy{
Type: ScalingPolicyTypeHorizontal,
Min: -1,
Max: 5,
},
expectedErr: "minimum count must be specified and non-negative",
},
{
name: "max < 0",
input: &ScalingPolicy{
Type: ScalingPolicyTypeHorizontal,
Min: 5,
Max: -1,
},
expectedErr: "maximum count must be specified and non-negative",
},
{
name: "min > max",
input: &ScalingPolicy{
Type: ScalingPolicyTypeHorizontal,
Min: 10,
Max: 0,
},
expectedErr: "maximum count must not be less than minimum count",
},
{
name: "min == max",
input: &ScalingPolicy{
Type: ScalingPolicyTypeHorizontal,
Min: 10,
Max: 10,
},
},
{
name: "min == 0",
input: &ScalingPolicy{
Type: ScalingPolicyTypeHorizontal,
Min: 0,
Max: 10,
},
},
{
name: "max == 0",
input: &ScalingPolicy{
Type: ScalingPolicyTypeHorizontal,
Min: 0,
Max: 0,
},
},
{
name: "horizontal missing namespace",
input: &ScalingPolicy{
Type: ScalingPolicyTypeHorizontal,
Target: map[string]string{
ScalingTargetJob: "my-job",
ScalingTargetGroup: "my-group",
},
},
expectedErr: "missing target namespace",
},
{
name: "horizontal missing job",
input: &ScalingPolicy{
Type: ScalingPolicyTypeHorizontal,
Target: map[string]string{
ScalingTargetNamespace: "my-namespace",
ScalingTargetGroup: "my-group",
},
},
expectedErr: "missing target job",
},
{
name: "horizontal missing group",
input: &ScalingPolicy{
Type: ScalingPolicyTypeHorizontal,
Target: map[string]string{
ScalingTargetNamespace: "my-namespace",
ScalingTargetJob: "my-job",
},
},
expectedErr: "missing target group",
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
require := require.New(t)
err := c.input.Validate()
if len(c.expectedErr) > 0 {
require.Error(err, c.expectedErr)
2020-09-29 21:57:46 +00:00
} else {
require.NoError(err)
}
})
}
}
func TestIsRecoverable(t *testing.T) {
ci.Parallel(t)
if IsRecoverable(nil) {
t.Errorf("nil should not be recoverable")
}
if IsRecoverable(NewRecoverableError(nil, true)) {
t.Errorf("NewRecoverableError(nil, true) should not be recoverable")
}
if IsRecoverable(fmt.Errorf("i promise im recoverable")) {
t.Errorf("Custom errors should not be recoverable")
}
if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) {
t.Errorf("Explicitly unrecoverable errors should not be recoverable")
}
if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) {
t.Errorf("Explicitly recoverable errors *should* be recoverable")
}
}
2017-08-12 21:36:10 +00:00
func TestACLTokenValidate(t *testing.T) {
ci.Parallel(t)
2017-08-12 21:36:10 +00:00
tk := &ACLToken{}
2018-03-11 18:27:02 +00:00
// Missing a type
2017-08-12 21:36:10 +00:00
err := tk.Validate()
assert.NotNil(t, err)
if !strings.Contains(err.Error(), "client or management") {
t.Fatalf("bad: %v", err)
}
// Missing policies
tk.Type = ACLClientToken
err = tk.Validate()
assert.NotNil(t, err)
if !strings.Contains(err.Error(), "missing policies") {
t.Fatalf("bad: %v", err)
}
2018-03-11 18:37:37 +00:00
// Invalid policies
2017-08-12 21:36:10 +00:00
tk.Type = ACLManagementToken
tk.Policies = []string{"foo"}
err = tk.Validate()
assert.NotNil(t, err)
if !strings.Contains(err.Error(), "associated with policies") {
t.Fatalf("bad: %v", err)
}
2018-03-11 18:37:37 +00:00
// Name too long policies
2018-03-26 21:28:33 +00:00
tk.Name = ""
for i := 0; i < 8; i++ {
tk.Name += uuid.Generate()
}
2017-08-12 21:36:10 +00:00
tk.Policies = nil
err = tk.Validate()
assert.NotNil(t, err)
if !strings.Contains(err.Error(), "too long") {
t.Fatalf("bad: %v", err)
}
// Make it valid
tk.Name = "foo"
err = tk.Validate()
assert.Nil(t, err)
}
2017-08-22 00:45:11 +00:00
func TestACLTokenPolicySubset(t *testing.T) {
ci.Parallel(t)
2017-08-22 00:45:11 +00:00
tk := &ACLToken{
Type: ACLClientToken,
Policies: []string{"foo", "bar", "baz"},
}
assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"}))
assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"}))
assert.Equal(t, true, tk.PolicySubset([]string{"foo"}))
assert.Equal(t, true, tk.PolicySubset([]string{}))
assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"}))
assert.Equal(t, false, tk.PolicySubset([]string{"new"}))
tk = &ACLToken{
Type: ACLManagementToken,
}
assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"}))
assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"}))
assert.Equal(t, true, tk.PolicySubset([]string{"foo"}))
assert.Equal(t, true, tk.PolicySubset([]string{}))
assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"}))
assert.Equal(t, true, tk.PolicySubset([]string{"new"}))
}
func TestACLTokenSetHash(t *testing.T) {
ci.Parallel(t)
tk := &ACLToken{
Name: "foo",
Type: ACLClientToken,
Policies: []string{"foo", "bar"},
Global: false,
}
out1 := tk.SetHash()
assert.NotNil(t, out1)
assert.NotNil(t, tk.Hash)
assert.Equal(t, out1, tk.Hash)
tk.Policies = []string{"foo"}
out2 := tk.SetHash()
assert.NotNil(t, out2)
assert.NotNil(t, tk.Hash)
assert.Equal(t, out2, tk.Hash)
assert.NotEqual(t, out1, out2)
}
func TestACLPolicySetHash(t *testing.T) {
ci.Parallel(t)
ap := &ACLPolicy{
Name: "foo",
Description: "great policy",
Rules: "node { policy = \"read\" }",
}
out1 := ap.SetHash()
assert.NotNil(t, out1)
assert.NotNil(t, ap.Hash)
assert.Equal(t, out1, ap.Hash)
ap.Rules = "node { policy = \"write\" }"
out2 := ap.SetHash()
assert.NotNil(t, out2)
assert.NotNil(t, ap.Hash)
assert.Equal(t, out2, ap.Hash)
assert.NotEqual(t, out1, out2)
}
func TestTaskEventPopulate(t *testing.T) {
ci.Parallel(t)
prepopulatedEvent := NewTaskEvent(TaskSetup)
prepopulatedEvent.DisplayMessage = "Hola"
testcases := []struct {
event *TaskEvent
expectedMsg string
}{
{nil, ""},
{prepopulatedEvent, "Hola"},
{NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"},
{NewTaskEvent(TaskStarted), "Task started by client"},
{NewTaskEvent(TaskReceived), "Task received by client"},
{NewTaskEvent(TaskFailedValidation), "Validation of task failed"},
{NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"},
{NewTaskEvent(TaskSetupFailure), "Task setup failed"},
{NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"},
{NewTaskEvent(TaskDriverFailure), "Failed to start task"},
{NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"},
{NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"},
{NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"},
{NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"},
{NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"},
{NewTaskEvent(TaskKilling), "Sent interrupt"},
{NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"},
{NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"},
{NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"},
{NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""},
{NewTaskEvent(TaskKilled), "Task successfully killed"},
{NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"},
{NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"},
{NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"},
{NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"},
{NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"},
{NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"},
{NewTaskEvent(TaskSignaling), "Task being sent a signal"},
{NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"},
{NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"},
{NewTaskEvent(TaskRestartSignal), "Task signaled to restart"},
{NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"},
{NewTaskEvent(TaskClientReconnected), "Client reconnected"},
{NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"},
{NewTaskEvent("Unknown Type, No message"), ""},
{NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"},
}
for _, tc := range testcases {
tc.event.PopulateEventDisplayMessage()
if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg {
t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage)
}
}
}
func TestNetworkResourcesEquals(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
var networkResourcesTest = []struct {
input []*NetworkResource
expected bool
errorMsg string
}{
{
[]*NetworkResource{
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"web", 80, 0, ""}},
},
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"web", 80, 0, ""}},
},
},
true,
"Equal network resources should return true",
},
{
[]*NetworkResource{
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.0",
MBits: 50,
ReservedPorts: []Port{{"web", 80, 0, ""}},
},
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"web", 80, 0, ""}},
},
},
false,
"Different IP addresses should return false",
},
{
[]*NetworkResource{
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 40,
ReservedPorts: []Port{{"web", 80, 0, ""}},
},
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"web", 80, 0, ""}},
},
},
false,
"Different MBits values should return false",
},
{
[]*NetworkResource{
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"web", 80, 0, ""}},
},
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"web", 80, 0, ""}, {"web", 80, 0, ""}},
},
},
false,
"Different ReservedPorts lengths should return false",
},
{
[]*NetworkResource{
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"web", 80, 0, ""}},
},
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{},
},
},
false,
"Empty and non empty ReservedPorts values should return false",
},
{
[]*NetworkResource{
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"web", 80, 0, ""}},
},
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"notweb", 80, 0, ""}},
},
},
false,
"Different valued ReservedPorts values should return false",
},
{
[]*NetworkResource{
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
DynamicPorts: []Port{{"web", 80, 0, ""}},
},
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
DynamicPorts: []Port{{"web", 80, 0, ""}, {"web", 80, 0, ""}},
},
},
false,
"Different DynamicPorts lengths should return false",
},
{
[]*NetworkResource{
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
DynamicPorts: []Port{{"web", 80, 0, ""}},
},
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
DynamicPorts: []Port{},
},
},
false,
"Empty and non empty DynamicPorts values should return false",
},
{
[]*NetworkResource{
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
DynamicPorts: []Port{{"web", 80, 0, ""}},
},
2018-02-28 17:58:44 +00:00
{
IP: "10.0.0.1",
MBits: 50,
DynamicPorts: []Port{{"notweb", 80, 0, ""}},
},
},
false,
"Different valued DynamicPorts values should return false",
},
}
for _, testCase := range networkResourcesTest {
first := testCase.input[0]
second := testCase.input[1]
require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg)
}
}
2018-03-06 22:37:37 +00:00
func TestNode_Canonicalize(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
// Make sure the eligiblity is set properly
node := &Node{}
node.Canonicalize()
require.Equal(NodeSchedulingEligible, node.SchedulingEligibility)
node = &Node{
DrainStrategy: &DrainStrategy{
DrainSpec: DrainSpec{
Deadline: 30000,
},
},
}
node.Canonicalize()
require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility)
}
2018-04-17 15:53:18 +00:00
func TestNode_Copy(t *testing.T) {
ci.Parallel(t)
2018-04-17 15:53:18 +00:00
require := require.New(t)
node := &Node{
ID: uuid.Generate(),
SecretID: uuid.Generate(),
Datacenter: "dc1",
Name: "foobar",
Attributes: map[string]string{
"kernel.name": "linux",
"arch": "x86",
"nomad.version": "0.5.0",
"driver.exec": "1",
"driver.mock_driver": "1",
},
Resources: &Resources{
CPU: 4000,
MemoryMB: 8192,
DiskMB: 100 * 1024,
Networks: []*NetworkResource{
{
Device: "eth0",
CIDR: "192.168.0.100/32",
MBits: 1000,
},
},
},
Reserved: &Resources{
CPU: 100,
MemoryMB: 256,
DiskMB: 4 * 1024,
Networks: []*NetworkResource{
{
Device: "eth0",
IP: "192.168.0.100",
ReservedPorts: []Port{{Label: "ssh", Value: 22}},
MBits: 1,
},
},
},
2018-10-03 16:47:18 +00:00
NodeResources: &NodeResources{
Cpu: NodeCpuResources{
CpuShares: 4000,
TotalCpuCores: 4,
ReservableCpuCores: []uint16{0, 1, 2, 3},
2018-10-03 16:47:18 +00:00
},
Memory: NodeMemoryResources{
MemoryMB: 8192,
},
Disk: NodeDiskResources{
DiskMB: 100 * 1024,
},
Networks: []*NetworkResource{
{
Device: "eth0",
CIDR: "192.168.0.100/32",
MBits: 1000,
},
},
},
ReservedResources: &NodeReservedResources{
Cpu: NodeReservedCpuResources{
CpuShares: 100,
ReservedCpuCores: []uint16{0},
2018-10-03 16:47:18 +00:00
},
Memory: NodeReservedMemoryResources{
MemoryMB: 256,
},
Disk: NodeReservedDiskResources{
DiskMB: 4 * 1024,
},
Networks: NodeReservedNetworkResources{
ReservedHostPorts: "22",
},
},
2018-04-17 15:53:18 +00:00
Links: map[string]string{
"consul": "foobar.dc1",
},
Meta: map[string]string{
"pci-dss": "true",
"database": "mysql",
"version": "5.6",
},
NodeClass: "linux-medium-pci",
Status: NodeStatusReady,
SchedulingEligibility: NodeSchedulingEligible,
Drivers: map[string]*DriverInfo{
2018-04-17 20:39:32 +00:00
"mock_driver": {
2018-04-17 15:53:18 +00:00
Attributes: map[string]string{"running": "1"},
Detected: true,
Healthy: true,
HealthDescription: "Currently active",
UpdateTime: time.Now(),
},
},
}
node.ComputeClass()
node2 := node.Copy()
require.Equal(node.Attributes, node2.Attributes)
require.Equal(node.Resources, node2.Resources)
require.Equal(node.Reserved, node2.Reserved)
require.Equal(node.Links, node2.Links)
require.Equal(node.Meta, node2.Meta)
require.Equal(node.Events, node2.Events)
require.Equal(node.DrainStrategy, node2.DrainStrategy)
require.Equal(node.Drivers, node2.Drivers)
}
func TestNode_Sanitize(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
testCases := []*Node{
nil,
{
ID: uuid.Generate(),
SecretID: "",
},
{
ID: uuid.Generate(),
SecretID: uuid.Generate(),
},
}
for _, tc := range testCases {
sanitized := tc.Sanitize()
if tc == nil {
require.Nil(sanitized)
} else {
require.NotNil(sanitized)
require.Empty(sanitized.SecretID)
}
}
}
func TestSpread_Validate(t *testing.T) {
ci.Parallel(t)
type tc struct {
spread *Spread
err error
name string
}
testCases := []tc{
{
spread: &Spread{},
err: fmt.Errorf("Missing spread attribute"),
name: "empty spread",
},
{
spread: &Spread{
Attribute: "${node.datacenter}",
Weight: -1,
},
err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"),
name: "Invalid weight",
},
{
spread: &Spread{
Attribute: "${node.datacenter}",
Weight: 110,
},
err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"),
name: "Invalid weight",
},
{
spread: &Spread{
Attribute: "${node.datacenter}",
Weight: 50,
SpreadTarget: []*SpreadTarget{
{
Value: "dc1",
Percent: 25,
},
{
Value: "dc2",
Percent: 150,
},
},
},
err: fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"),
name: "Invalid percentages",
},
{
spread: &Spread{
Attribute: "${node.datacenter}",
Weight: 50,
SpreadTarget: []*SpreadTarget{
{
Value: "dc1",
Percent: 75,
},
{
Value: "dc2",
Percent: 75,
},
},
},
err: fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150),
name: "Invalid percentages",
},
{
spread: &Spread{
Attribute: "${node.datacenter}",
Weight: 50,
SpreadTarget: []*SpreadTarget{
{
Value: "dc1",
Percent: 25,
},
{
Value: "dc1",
Percent: 50,
},
},
},
err: fmt.Errorf("Spread target value \"dc1\" already defined"),
name: "No spread targets",
},
{
spread: &Spread{
Attribute: "${node.datacenter}",
Weight: 50,
SpreadTarget: []*SpreadTarget{
{
Value: "dc1",
Percent: 25,
},
{
Value: "dc2",
Percent: 50,
},
},
},
err: nil,
name: "Valid spread",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := tc.spread.Validate()
if tc.err != nil {
require.NotNil(t, err)
require.Contains(t, err.Error(), tc.err.Error())
} else {
require.Nil(t, err)
}
})
}
}
2018-10-02 20:36:04 +00:00
func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) {
ci.Parallel(t)
2018-10-02 20:36:04 +00:00
require := require.New(t)
cases := []struct {
Input string
Parsed []uint64
Err bool
}{
{
"1,2,3",
[]uint64{1, 2, 3},
false,
},
{
"3,1,2,1,2,3,1-3",
[]uint64{1, 2, 3},
false,
},
{
"3-1",
nil,
true,
},
{
"1-3,2-4",
[]uint64{1, 2, 3, 4},
false,
},
{
"1-3,4,5-5,6,7,8-10",
[]uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
false,
},
}
for i, tc := range cases {
r := &NodeReservedNetworkResources{ReservedHostPorts: tc.Input}
out, err := r.ParseReservedHostPorts()
if (err != nil) != tc.Err {
t.Fatalf("test case %d: %v", i, err)
continue
}
require.Equal(out, tc.Parsed)
}
}
func TestMultiregion_CopyCanonicalize(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
emptyOld := &Multiregion{}
expected := &Multiregion{
Strategy: &MultiregionStrategy{},
Regions: []*MultiregionRegion{},
}
old := emptyOld.Copy()
old.Canonicalize()
require.Equal(old, expected)
require.False(old.Diff(expected))
nonEmptyOld := &Multiregion{
Strategy: &MultiregionStrategy{
MaxParallel: 2,
OnFailure: "fail_all",
},
Regions: []*MultiregionRegion{
{
Name: "west",
Count: 2,
Datacenters: []string{"west-1", "west-2"},
Meta: map[string]string{},
},
{
Name: "east",
Count: 1,
Datacenters: []string{"east-1"},
Meta: map[string]string{},
},
},
}
old = nonEmptyOld.Copy()
old.Canonicalize()
require.Equal(old, nonEmptyOld)
require.False(old.Diff(nonEmptyOld))
}
2021-12-23 20:28:19 +00:00
func TestNodeResources_Copy(t *testing.T) {
ci.Parallel(t)
2021-12-23 20:28:19 +00:00
orig := &NodeResources{
Cpu: NodeCpuResources{
CpuShares: int64(32000),
TotalCpuCores: 32,
ReservableCpuCores: []uint16{1, 2, 3, 9},
},
Memory: NodeMemoryResources{
MemoryMB: int64(64000),
},
Networks: Networks{
{
Device: "foo",
},
},
NodeNetworks: []*NodeNetworkResource{
{
Mode: "host",
Device: "eth0",
MacAddress: "00:00:00:00:00:00",
Speed: 1000,
Addresses: []NodeNetworkAddress{
{
Family: NodeNetworkAF_IPv4,
Alias: "private",
Address: "192.168.0.100",
ReservedPorts: "22,80",
Gateway: "192.168.0.1",
},
},
},
},
2021-12-23 20:28:19 +00:00
}
kopy := orig.Copy()
assert.Equal(t, orig, kopy)
// Make sure slices aren't shared
kopy.Cpu.ReservableCpuCores[1] = 9000
assert.NotEqual(t, orig.Cpu.ReservableCpuCores, kopy.Cpu.ReservableCpuCores)
kopy.NodeNetworks[0].MacAddress = "11:11:11:11:11:11"
kopy.NodeNetworks[0].Addresses[0].Alias = "public"
assert.NotEqual(t, orig.NodeNetworks[0], kopy.NodeNetworks[0])
2021-12-23 20:28:19 +00:00
}
2020-05-15 15:09:01 +00:00
func TestNodeResources_Merge(t *testing.T) {
ci.Parallel(t)
2020-05-15 15:09:01 +00:00
res := &NodeResources{
Cpu: NodeCpuResources{
CpuShares: int64(32000),
TotalCpuCores: 32,
2020-05-15 15:09:01 +00:00
},
Memory: NodeMemoryResources{
MemoryMB: int64(64000),
},
Networks: Networks{
{
Device: "foo",
},
},
}
res.Merge(&NodeResources{
Cpu: NodeCpuResources{ReservableCpuCores: []uint16{0, 1, 2, 3}},
2020-05-15 15:09:01 +00:00
Memory: NodeMemoryResources{
MemoryMB: int64(100000),
},
Networks: Networks{
{
Mode: "foo/bar",
},
},
})
require.Exactly(t, &NodeResources{
Cpu: NodeCpuResources{
CpuShares: int64(32000),
TotalCpuCores: 32,
ReservableCpuCores: []uint16{0, 1, 2, 3},
2020-05-15 15:09:01 +00:00
},
Memory: NodeMemoryResources{
MemoryMB: int64(100000),
},
Networks: Networks{
{
Device: "foo",
},
{
Mode: "foo/bar",
},
},
}, res)
}
func TestAllocatedResources_Canonicalize(t *testing.T) {
ci.Parallel(t)
cases := map[string]struct {
input *AllocatedResources
expected *AllocatedResources
}{
"base": {
input: &AllocatedResources{
Tasks: map[string]*AllocatedTaskResources{
"task": {
Networks: Networks{
{
IP: "127.0.0.1",
DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
},
},
},
},
},
expected: &AllocatedResources{
Tasks: map[string]*AllocatedTaskResources{
"task": {
Networks: Networks{
{
IP: "127.0.0.1",
DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
},
},
},
},
Shared: AllocatedSharedResources{
Ports: AllocatedPorts{
{
Label: "admin",
Value: 8080,
To: 0,
HostIP: "127.0.0.1",
},
},
},
},
},
"base with existing": {
input: &AllocatedResources{
Tasks: map[string]*AllocatedTaskResources{
"task": {
Networks: Networks{
{
IP: "127.0.0.1",
DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
},
},
},
},
Shared: AllocatedSharedResources{
Ports: AllocatedPorts{
{
Label: "http",
Value: 80,
To: 8080,
HostIP: "127.0.0.1",
},
},
},
},
expected: &AllocatedResources{
Tasks: map[string]*AllocatedTaskResources{
"task": {
Networks: Networks{
{
IP: "127.0.0.1",
DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
},
},
},
},
Shared: AllocatedSharedResources{
Ports: AllocatedPorts{
{
Label: "http",
Value: 80,
To: 8080,
HostIP: "127.0.0.1",
},
{
Label: "admin",
Value: 8080,
To: 0,
HostIP: "127.0.0.1",
},
},
},
},
},
}
for name, tc := range cases {
tc.input.Canonicalize()
require.Exactly(t, tc.expected, tc.input, "case %s did not match", name)
}
}
func TestAllocatedSharedResources_Canonicalize(t *testing.T) {
ci.Parallel(t)
a := &AllocatedSharedResources{
Networks: []*NetworkResource{
{
IP: "127.0.0.1",
DynamicPorts: []Port{
{
Label: "http",
Value: 22222,
To: 8080,
},
},
ReservedPorts: []Port{
{
Label: "redis",
Value: 6783,
To: 6783,
},
},
},
},
}
a.Canonicalize()
require.Exactly(t, AllocatedPorts{
{
Label: "http",
Value: 22222,
To: 8080,
HostIP: "127.0.0.1",
},
{
Label: "redis",
Value: 6783,
To: 6783,
HostIP: "127.0.0.1",
},
}, a.Ports)
}
func TestTaskGroup_validateScriptChecksInGroupServices(t *testing.T) {
ci.Parallel(t)
t.Run("service task not set", func(t *testing.T) {
tg := &TaskGroup{
Name: "group1",
Services: []*Service{{
Name: "service1",
TaskName: "", // unset
Checks: []*ServiceCheck{{
Name: "check1",
Type: "script",
TaskName: "", // unset
}, {
Name: "check2",
Type: "ttl", // not script
}, {
Name: "check3",
Type: "script",
TaskName: "", // unset
}},
}, {
Name: "service2",
Checks: []*ServiceCheck{{
Type: "script",
TaskName: "task1", // set
}},
}, {
Name: "service3",
TaskName: "", // unset
Checks: []*ServiceCheck{{
Name: "check1",
Type: "script",
TaskName: "", // unset
}},
}},
}
errStr := tg.validateScriptChecksInGroupServices().Error()
require.Contains(t, errStr, "Service [group1]->service1 or Check check1 must specify task parameter")
require.Contains(t, errStr, "Service [group1]->service1 or Check check3 must specify task parameter")
require.Contains(t, errStr, "Service [group1]->service3 or Check check1 must specify task parameter")
})
t.Run("service task set", func(t *testing.T) {
tgOK := &TaskGroup{
Name: "group1",
Services: []*Service{{
Name: "service1",
TaskName: "task1",
Checks: []*ServiceCheck{{
Name: "check1",
Type: "script",
}, {
Name: "check2",
Type: "ttl",
}, {
Name: "check3",
Type: "script",
}},
}},
}
mErrOK := tgOK.validateScriptChecksInGroupServices()
require.Nil(t, mErrOK)
})
}
func TestComparableResources_Superset(t *testing.T) {
ci.Parallel(t)
base := &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: 4000,
ReservedCores: []uint16{0, 1, 2, 3},
},
Memory: AllocatedMemoryResources{MemoryMB: 4096},
},
Shared: AllocatedSharedResources{DiskMB: 10000},
}
cases := []struct {
a *ComparableResources
b *ComparableResources
dimension string
}{
{
a: base,
b: &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{CpuShares: 1000, ReservedCores: []uint16{0}},
},
},
},
{
a: base,
b: &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{CpuShares: 4000, ReservedCores: []uint16{0, 1, 2, 3}},
},
},
},
{
a: base,
b: &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{CpuShares: 5000},
},
},
dimension: "cpu",
},
{
a: base,
b: &ComparableResources{
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{CpuShares: 1000, ReservedCores: []uint16{3, 4}},
},
},
dimension: "cores",
},
}
for _, c := range cases {
fit, dim := c.a.Superset(c.b)
if c.dimension == "" {
require.True(t, fit)
} else {
require.False(t, fit)
require.Equal(t, c.dimension, dim)
}
}
}
func requireErrors(t *testing.T, err error, expected ...string) {
t.Helper()
require.Error(t, err)
mErr, ok := err.(*multierror.Error)
require.True(t, ok)
var found []string
for _, e := range expected {
for _, actual := range mErr.Errors {
if strings.Contains(actual.Error(), e) {
found = append(found, e)
break
}
}
}
require.Equal(t, expected, found)
}