2015-09-10 00:59:18 +00:00
|
|
|
package api
|
|
|
|
|
|
|
|
import (
|
2019-01-10 12:43:51 +00:00
|
|
|
"path/filepath"
|
2015-09-10 00:59:18 +00:00
|
|
|
"reflect"
|
|
|
|
"testing"
|
2018-01-09 22:53:34 +00:00
|
|
|
"time"
|
2017-02-06 19:48:28 +00:00
|
|
|
|
2017-08-30 18:35:19 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2018-07-24 15:37:13 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2015-09-10 00:59:18 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestTaskGroup_NewTaskGroup(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:59:18 +00:00
|
|
|
grp := NewTaskGroup("grp1", 2)
|
|
|
|
expect := &TaskGroup{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("grp1"),
|
|
|
|
Count: intToPtr(2),
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(grp, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, grp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskGroup_Constrain(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:59:18 +00:00
|
|
|
grp := NewTaskGroup("grp1", 1)
|
|
|
|
|
|
|
|
// Add a constraint to the group
|
2015-10-27 21:31:14 +00:00
|
|
|
out := grp.Constrain(NewConstraint("kernel.name", "=", "darwin"))
|
2015-09-10 00:59:18 +00:00
|
|
|
if n := len(grp.Constraints); n != 1 {
|
|
|
|
t.Fatalf("expected 1 constraint, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the group was returned
|
|
|
|
if out != grp {
|
|
|
|
t.Fatalf("expected: %#v, got: %#v", grp, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a second constraint
|
2015-10-27 21:31:14 +00:00
|
|
|
grp.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
|
2015-09-10 00:59:18 +00:00
|
|
|
expect := []*Constraint{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
LTarget: "kernel.name",
|
|
|
|
RTarget: "darwin",
|
|
|
|
Operand: "=",
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
LTarget: "memory.totalbytes",
|
|
|
|
RTarget: "128000000",
|
|
|
|
Operand: ">=",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(grp.Constraints, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, grp.Constraints)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-16 13:30:58 +00:00
|
|
|
func TestTaskGroup_AddAffinity(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
grp := NewTaskGroup("grp1", 1)
|
|
|
|
|
|
|
|
// Add an affinity to the group
|
|
|
|
out := grp.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
|
|
|
|
if n := len(grp.Affinities); n != 1 {
|
|
|
|
t.Fatalf("expected 1 affinity, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the group was returned
|
|
|
|
if out != grp {
|
|
|
|
t.Fatalf("expected: %#v, got: %#v", grp, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a second affinity
|
|
|
|
grp.AddAffinity(NewAffinity("${node.affinity}", "=", "dc2", 50))
|
|
|
|
expect := []*Affinity{
|
|
|
|
{
|
|
|
|
LTarget: "kernel.version",
|
|
|
|
RTarget: "4.6",
|
|
|
|
Operand: "=",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(100),
|
2018-07-16 13:30:58 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
LTarget: "${node.affinity}",
|
|
|
|
RTarget: "dc2",
|
|
|
|
Operand: "=",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(50),
|
2018-07-16 13:30:58 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(grp.Affinities, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, grp.Constraints)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
func TestTaskGroup_SetMeta(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:59:18 +00:00
|
|
|
grp := NewTaskGroup("grp1", 1)
|
|
|
|
|
|
|
|
// Initializes an empty map
|
|
|
|
out := grp.SetMeta("foo", "bar")
|
|
|
|
if grp.Meta == nil {
|
|
|
|
t.Fatalf("should be initialized")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we returned the group
|
|
|
|
if out != grp {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", grp, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a second meta k/v
|
|
|
|
grp.SetMeta("baz", "zip")
|
|
|
|
expect := map[string]string{"foo": "bar", "baz": "zip"}
|
|
|
|
if !reflect.DeepEqual(grp.Meta, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, grp.Meta)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-18 15:53:03 +00:00
|
|
|
func TestTaskGroup_AddSpread(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
grp := NewTaskGroup("grp1", 1)
|
|
|
|
|
|
|
|
// Create and add spread
|
|
|
|
spreadTarget := NewSpreadTarget("r1", 50)
|
|
|
|
spread := NewSpread("${meta.rack}", 100, []*SpreadTarget{spreadTarget})
|
|
|
|
|
|
|
|
out := grp.AddSpread(spread)
|
|
|
|
if n := len(grp.Spreads); n != 1 {
|
|
|
|
t.Fatalf("expected 1 spread, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the group was returned
|
|
|
|
if out != grp {
|
|
|
|
t.Fatalf("expected: %#v, got: %#v", grp, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a second spread
|
|
|
|
spreadTarget2 := NewSpreadTarget("dc1", 100)
|
|
|
|
spread2 := NewSpread("${node.datacenter}", 100, []*SpreadTarget{spreadTarget2})
|
|
|
|
|
|
|
|
grp.AddSpread(spread2)
|
|
|
|
|
|
|
|
expect := []*Spread{
|
|
|
|
{
|
|
|
|
Attribute: "${meta.rack}",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(100),
|
2018-07-18 15:53:03 +00:00
|
|
|
SpreadTarget: []*SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "r1",
|
|
|
|
Percent: 50,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Attribute: "${node.datacenter}",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(100),
|
2018-07-18 15:53:03 +00:00
|
|
|
SpreadTarget: []*SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "dc1",
|
|
|
|
Percent: 100,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(grp.Spreads, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, grp.Spreads)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
func TestTaskGroup_AddTask(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:59:18 +00:00
|
|
|
grp := NewTaskGroup("grp1", 1)
|
|
|
|
|
|
|
|
// Add the task to the task group
|
|
|
|
out := grp.AddTask(NewTask("task1", "java"))
|
|
|
|
if n := len(grp.Tasks); n != 1 {
|
|
|
|
t.Fatalf("expected 1 task, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we returned the group
|
|
|
|
if out != grp {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", grp, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a second task
|
|
|
|
grp.AddTask(NewTask("task2", "exec"))
|
|
|
|
expect := []*Task{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
Name: "task1",
|
|
|
|
Driver: "java",
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
Name: "task2",
|
|
|
|
Driver: "exec",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(grp.Tasks, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, grp.Tasks)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTask_NewTask(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:59:18 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
expect := &Task{
|
|
|
|
Name: "task1",
|
|
|
|
Driver: "exec",
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(task, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, task)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTask_SetConfig(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:59:18 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
|
|
|
|
// Initializes an empty map
|
|
|
|
out := task.SetConfig("foo", "bar")
|
|
|
|
if task.Config == nil {
|
|
|
|
t.Fatalf("should be initialized")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we returned the task
|
|
|
|
if out != task {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", task, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set another config value
|
|
|
|
task.SetConfig("baz", "zip")
|
2015-11-15 01:30:36 +00:00
|
|
|
expect := map[string]interface{}{"foo": "bar", "baz": "zip"}
|
2015-09-10 00:59:18 +00:00
|
|
|
if !reflect.DeepEqual(task.Config, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, task.Config)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTask_SetMeta(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:59:18 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
|
|
|
|
// Initializes an empty map
|
|
|
|
out := task.SetMeta("foo", "bar")
|
|
|
|
if task.Meta == nil {
|
|
|
|
t.Fatalf("should be initialized")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we returned the task
|
|
|
|
if out != task {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", task, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set another meta k/v
|
|
|
|
task.SetMeta("baz", "zip")
|
|
|
|
expect := map[string]string{"foo": "bar", "baz": "zip"}
|
|
|
|
if !reflect.DeepEqual(task.Meta, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, task.Meta)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTask_Require(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:59:18 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
|
|
|
|
// Create some require resources
|
|
|
|
resources := &Resources{
|
2019-01-18 18:28:35 +00:00
|
|
|
CPU: intToPtr(1250),
|
|
|
|
MemoryMB: intToPtr(128),
|
|
|
|
DiskMB: intToPtr(2048),
|
2015-09-10 00:59:18 +00:00
|
|
|
Networks: []*NetworkResource{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
CIDR: "0.0.0.0/0",
|
2019-01-18 18:28:35 +00:00
|
|
|
MBits: intToPtr(100),
|
2019-04-29 19:39:55 +00:00
|
|
|
ReservedPorts: []Port{{"", 80, 0}, {"", 443, 0}},
|
2015-09-10 00:59:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
out := task.Require(resources)
|
|
|
|
if !reflect.DeepEqual(task.Resources, resources) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", resources, task.Resources)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that we returned the task
|
|
|
|
if out != task {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", task, out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTask_Constrain(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2015-09-10 00:59:18 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
|
|
|
|
// Add a constraint to the task
|
2015-10-27 21:31:14 +00:00
|
|
|
out := task.Constrain(NewConstraint("kernel.name", "=", "darwin"))
|
2015-09-10 00:59:18 +00:00
|
|
|
if n := len(task.Constraints); n != 1 {
|
|
|
|
t.Fatalf("expected 1 constraint, got: %d", n)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the task was returned
|
|
|
|
if out != task {
|
|
|
|
t.Fatalf("expected: %#v, got: %#v", task, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a second constraint
|
2015-10-27 21:31:14 +00:00
|
|
|
task.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
|
2015-09-10 00:59:18 +00:00
|
|
|
expect := []*Constraint{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
LTarget: "kernel.name",
|
|
|
|
RTarget: "darwin",
|
|
|
|
Operand: "=",
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
LTarget: "memory.totalbytes",
|
|
|
|
RTarget: "128000000",
|
|
|
|
Operand: ">=",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(task.Constraints, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, task.Constraints)
|
|
|
|
}
|
|
|
|
}
|
2017-07-06 03:44:49 +00:00
|
|
|
|
2018-07-16 13:30:58 +00:00
|
|
|
func TestTask_AddAffinity(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
|
|
|
|
// Add an affinity to the task
|
|
|
|
out := task.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
|
2018-07-24 15:37:13 +00:00
|
|
|
require := require.New(t)
|
2018-07-24 17:38:58 +00:00
|
|
|
require.Len(out.Affinities, 1)
|
2018-07-16 13:30:58 +00:00
|
|
|
|
|
|
|
// Check that the task was returned
|
|
|
|
if out != task {
|
|
|
|
t.Fatalf("expected: %#v, got: %#v", task, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a second affinity
|
|
|
|
task.AddAffinity(NewAffinity("${node.datacenter}", "=", "dc2", 50))
|
|
|
|
expect := []*Affinity{
|
|
|
|
{
|
|
|
|
LTarget: "kernel.version",
|
|
|
|
RTarget: "4.6",
|
|
|
|
Operand: "=",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(100),
|
2018-07-16 13:30:58 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
LTarget: "${node.datacenter}",
|
|
|
|
RTarget: "dc2",
|
|
|
|
Operand: "=",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(50),
|
2018-07-16 13:30:58 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(task.Affinities, expect) {
|
|
|
|
t.Fatalf("expect: %#v, got: %#v", expect, task.Affinities)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-06 03:44:49 +00:00
|
|
|
func TestTask_Artifact(t *testing.T) {
|
2017-07-21 23:33:04 +00:00
|
|
|
t.Parallel()
|
2017-07-06 03:44:49 +00:00
|
|
|
a := TaskArtifact{
|
2019-01-18 18:28:35 +00:00
|
|
|
GetterSource: stringToPtr("http://localhost/foo.txt"),
|
|
|
|
GetterMode: stringToPtr("file"),
|
2017-07-06 03:44:49 +00:00
|
|
|
}
|
|
|
|
a.Canonicalize()
|
|
|
|
if *a.GetterMode != "file" {
|
|
|
|
t.Errorf("expected file but found %q", *a.GetterMode)
|
|
|
|
}
|
2019-01-10 12:43:51 +00:00
|
|
|
if filepath.ToSlash(*a.RelativeDest) != "local/foo.txt" {
|
2017-07-06 03:44:49 +00:00
|
|
|
t.Errorf("expected local/foo.txt but found %q", *a.RelativeDest)
|
|
|
|
}
|
|
|
|
}
|
2017-08-30 18:35:19 +00:00
|
|
|
|
|
|
|
// Ensures no regression on https://github.com/hashicorp/nomad/issues/3132
|
|
|
|
func TestTaskGroup_Canonicalize_Update(t *testing.T) {
|
2019-07-18 17:04:20 +00:00
|
|
|
// Job with an Empty() Update
|
2017-08-30 18:35:19 +00:00
|
|
|
job := &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
ID: stringToPtr("test"),
|
2017-08-30 18:35:19 +00:00
|
|
|
Update: &UpdateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
AutoRevert: boolToPtr(false),
|
2019-05-09 13:42:18 +00:00
|
|
|
AutoPromote: boolToPtr(false),
|
2019-01-18 18:28:35 +00:00
|
|
|
Canary: intToPtr(0),
|
|
|
|
HealthCheck: stringToPtr(""),
|
|
|
|
HealthyDeadline: timeToPtr(0),
|
|
|
|
ProgressDeadline: timeToPtr(0),
|
|
|
|
MaxParallel: intToPtr(0),
|
|
|
|
MinHealthyTime: timeToPtr(0),
|
|
|
|
Stagger: timeToPtr(0),
|
2017-08-30 18:35:19 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
tg := &TaskGroup{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("foo"),
|
2017-08-30 18:35:19 +00:00
|
|
|
}
|
|
|
|
tg.Canonicalize(job)
|
2019-07-18 17:04:20 +00:00
|
|
|
assert.NotNil(t, job.Update)
|
2017-08-30 18:35:19 +00:00
|
|
|
assert.Nil(t, tg.Update)
|
|
|
|
}
|
2018-01-09 22:53:34 +00:00
|
|
|
|
2019-07-18 17:04:20 +00:00
|
|
|
func TestTaskGroup_Merge_Update(t *testing.T) {
|
|
|
|
job := &Job{
|
|
|
|
ID: stringToPtr("test"),
|
|
|
|
Update: &UpdateStrategy{},
|
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
|
|
|
|
// Merge and canonicalize part of an update stanza
|
|
|
|
tg := &TaskGroup{
|
|
|
|
Name: stringToPtr("foo"),
|
|
|
|
Update: &UpdateStrategy{
|
|
|
|
AutoRevert: boolToPtr(true),
|
|
|
|
Canary: intToPtr(5),
|
|
|
|
HealthCheck: stringToPtr("foo"),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
tg.Canonicalize(job)
|
|
|
|
require.Equal(t, &UpdateStrategy{
|
|
|
|
AutoRevert: boolToPtr(true),
|
|
|
|
AutoPromote: boolToPtr(false),
|
|
|
|
Canary: intToPtr(5),
|
|
|
|
HealthCheck: stringToPtr("foo"),
|
|
|
|
HealthyDeadline: timeToPtr(5 * time.Minute),
|
|
|
|
ProgressDeadline: timeToPtr(10 * time.Minute),
|
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
Stagger: timeToPtr(30 * time.Second),
|
|
|
|
}, tg.Update)
|
|
|
|
}
|
|
|
|
|
2018-03-01 19:21:32 +00:00
|
|
|
// Verifies that migrate strategy is merged correctly
|
|
|
|
func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
|
|
|
|
type testCase struct {
|
|
|
|
desc string
|
|
|
|
jobType string
|
|
|
|
jobMigrate *MigrateStrategy
|
|
|
|
taskMigrate *MigrateStrategy
|
|
|
|
expected *MigrateStrategy
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []testCase{
|
|
|
|
{
|
|
|
|
desc: "Default batch",
|
|
|
|
jobType: "batch",
|
|
|
|
jobMigrate: nil,
|
|
|
|
taskMigrate: nil,
|
|
|
|
expected: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Default service",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: nil,
|
|
|
|
taskMigrate: nil,
|
|
|
|
expected: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(1),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(5 * time.Minute),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Empty job migrate strategy",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(0),
|
|
|
|
HealthCheck: stringToPtr(""),
|
|
|
|
MinHealthyTime: timeToPtr(0),
|
|
|
|
HealthyDeadline: timeToPtr(0),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
taskMigrate: nil,
|
|
|
|
expected: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(0),
|
|
|
|
HealthCheck: stringToPtr(""),
|
|
|
|
MinHealthyTime: timeToPtr(0),
|
|
|
|
HealthyDeadline: timeToPtr(0),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Inherit from job",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(3),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(2),
|
|
|
|
HealthyDeadline: timeToPtr(2),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
taskMigrate: nil,
|
|
|
|
expected: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(3),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(2),
|
|
|
|
HealthyDeadline: timeToPtr(2),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Set in task",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: nil,
|
|
|
|
taskMigrate: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(3),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(2),
|
|
|
|
HealthyDeadline: timeToPtr(2),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
expected: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(3),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(2),
|
|
|
|
HealthyDeadline: timeToPtr(2),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Merge from job",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(11),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
taskMigrate: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(2),
|
|
|
|
HealthyDeadline: timeToPtr(2),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
expected: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(11),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(2),
|
|
|
|
HealthyDeadline: timeToPtr(2),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Override from group",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(11),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
taskMigrate: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(5),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(2),
|
|
|
|
HealthyDeadline: timeToPtr(2),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
expected: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(5),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(2),
|
|
|
|
HealthyDeadline: timeToPtr(2),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Parallel from job, defaulting",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(5),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
taskMigrate: nil,
|
|
|
|
expected: &MigrateStrategy{
|
2019-01-18 18:28:35 +00:00
|
|
|
MaxParallel: intToPtr(5),
|
|
|
|
HealthCheck: stringToPtr("checks"),
|
|
|
|
MinHealthyTime: timeToPtr(10 * time.Second),
|
|
|
|
HealthyDeadline: timeToPtr(5 * time.Minute),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.desc, func(t *testing.T) {
|
|
|
|
job := &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
ID: stringToPtr("test"),
|
2018-03-01 19:21:32 +00:00
|
|
|
Migrate: tc.jobMigrate,
|
2019-01-18 18:28:35 +00:00
|
|
|
Type: stringToPtr(tc.jobType),
|
2018-03-01 19:21:32 +00:00
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
tg := &TaskGroup{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("foo"),
|
2018-03-01 19:21:32 +00:00
|
|
|
Migrate: tc.taskMigrate,
|
|
|
|
}
|
|
|
|
tg.Canonicalize(job)
|
|
|
|
assert.Equal(t, tc.expected, tg.Migrate)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-09 22:53:34 +00:00
|
|
|
// TestService_CheckRestart asserts Service.CheckRestart settings are properly
|
|
|
|
// inherited by Checks.
|
|
|
|
func TestService_CheckRestart(t *testing.T) {
|
2019-01-18 18:28:35 +00:00
|
|
|
job := &Job{Name: stringToPtr("job")}
|
|
|
|
tg := &TaskGroup{Name: stringToPtr("group")}
|
2018-01-09 22:53:34 +00:00
|
|
|
task := &Task{Name: "task"}
|
|
|
|
service := &Service{
|
|
|
|
CheckRestart: &CheckRestart{
|
|
|
|
Limit: 11,
|
2019-01-18 18:28:35 +00:00
|
|
|
Grace: timeToPtr(11 * time.Second),
|
2018-01-09 22:53:34 +00:00
|
|
|
IgnoreWarnings: true,
|
|
|
|
},
|
|
|
|
Checks: []ServiceCheck{
|
|
|
|
{
|
|
|
|
Name: "all-set",
|
|
|
|
CheckRestart: &CheckRestart{
|
|
|
|
Limit: 22,
|
2019-01-18 18:28:35 +00:00
|
|
|
Grace: timeToPtr(22 * time.Second),
|
2018-01-09 22:53:34 +00:00
|
|
|
IgnoreWarnings: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "some-set",
|
|
|
|
CheckRestart: &CheckRestart{
|
|
|
|
Limit: 33,
|
2019-01-18 18:28:35 +00:00
|
|
|
Grace: timeToPtr(33 * time.Second),
|
2018-01-09 22:53:34 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "unset",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
service.Canonicalize(task, tg, job)
|
|
|
|
assert.Equal(t, service.Checks[0].CheckRestart.Limit, 22)
|
|
|
|
assert.Equal(t, *service.Checks[0].CheckRestart.Grace, 22*time.Second)
|
|
|
|
assert.True(t, service.Checks[0].CheckRestart.IgnoreWarnings)
|
|
|
|
|
|
|
|
assert.Equal(t, service.Checks[1].CheckRestart.Limit, 33)
|
|
|
|
assert.Equal(t, *service.Checks[1].CheckRestart.Grace, 33*time.Second)
|
|
|
|
assert.True(t, service.Checks[1].CheckRestart.IgnoreWarnings)
|
|
|
|
|
|
|
|
assert.Equal(t, service.Checks[2].CheckRestart.Limit, 11)
|
|
|
|
assert.Equal(t, *service.Checks[2].CheckRestart.Grace, 11*time.Second)
|
|
|
|
assert.True(t, service.Checks[2].CheckRestart.IgnoreWarnings)
|
|
|
|
}
|
2019-01-11 15:48:12 +00:00
|
|
|
|
|
|
|
// TestSpread_Canonicalize asserts that the spread stanza is canonicalized correctly
|
|
|
|
func TestSpread_Canonicalize(t *testing.T) {
|
|
|
|
job := &Job{
|
2019-01-18 18:28:35 +00:00
|
|
|
ID: stringToPtr("test"),
|
|
|
|
Type: stringToPtr("batch"),
|
2019-01-11 15:48:12 +00:00
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
tg := &TaskGroup{
|
2019-01-18 18:28:35 +00:00
|
|
|
Name: stringToPtr("foo"),
|
2019-01-11 15:48:12 +00:00
|
|
|
}
|
|
|
|
type testCase struct {
|
|
|
|
desc string
|
|
|
|
spread *Spread
|
2019-01-30 20:20:38 +00:00
|
|
|
expectedWeight int8
|
2019-01-11 15:48:12 +00:00
|
|
|
}
|
|
|
|
cases := []testCase{
|
|
|
|
{
|
|
|
|
"Nil spread",
|
|
|
|
&Spread{
|
|
|
|
Attribute: "test",
|
|
|
|
Weight: nil,
|
|
|
|
},
|
|
|
|
50,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Zero spread",
|
|
|
|
&Spread{
|
|
|
|
Attribute: "test",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(0),
|
2019-01-11 15:48:12 +00:00
|
|
|
},
|
|
|
|
0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Non Zero spread",
|
|
|
|
&Spread{
|
|
|
|
Attribute: "test",
|
2019-01-30 20:20:38 +00:00
|
|
|
Weight: int8ToPtr(100),
|
2019-01-11 15:48:12 +00:00
|
|
|
},
|
|
|
|
100,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
t.Run(tc.desc, func(t *testing.T) {
|
|
|
|
require := require.New(t)
|
|
|
|
tg.Spreads = []*Spread{tc.spread}
|
|
|
|
tg.Canonicalize(job)
|
|
|
|
for _, spr := range tg.Spreads {
|
|
|
|
require.Equal(tc.expectedWeight, *spr.Weight)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|