2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
package api
|
|
|
|
|
|
|
|
import (
|
2019-01-10 12:43:51 +00:00
|
|
|
"path/filepath"
|
2015-09-10 00:59:18 +00:00
|
|
|
"testing"
|
2018-01-09 22:53:34 +00:00
|
|
|
"time"
|
2017-02-06 19:48:28 +00:00
|
|
|
|
2022-03-17 13:34:57 +00:00
|
|
|
"github.com/hashicorp/nomad/api/internal/testutil"
|
2023-01-01 18:57:26 +00:00
|
|
|
"github.com/shoenig/test/must"
|
2015-09-10 00:59:18 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestTaskGroup_NewTaskGroup(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
grp := NewTaskGroup("grp1", 2)
|
|
|
|
expect := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("grp1"),
|
|
|
|
Count: pointerOf(2),
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, grp)
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskGroup_Constrain(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
grp := NewTaskGroup("grp1", 1)
|
|
|
|
|
|
|
|
// Add a constraint to the group
|
2015-10-27 21:31:14 +00:00
|
|
|
out := grp.Constrain(NewConstraint("kernel.name", "=", "darwin"))
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Len(t, 1, grp.Constraints)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Check that the group was returned
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, grp, out)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Add a second constraint
|
2015-10-27 21:31:14 +00:00
|
|
|
grp.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
|
2015-09-10 00:59:18 +00:00
|
|
|
expect := []*Constraint{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
LTarget: "kernel.name",
|
|
|
|
RTarget: "darwin",
|
|
|
|
Operand: "=",
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
LTarget: "memory.totalbytes",
|
|
|
|
RTarget: "128000000",
|
|
|
|
Operand: ">=",
|
|
|
|
},
|
|
|
|
}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, grp.Constraints)
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
|
|
|
|
2018-07-16 13:30:58 +00:00
|
|
|
func TestTaskGroup_AddAffinity(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2018-07-16 13:30:58 +00:00
|
|
|
grp := NewTaskGroup("grp1", 1)
|
|
|
|
|
|
|
|
// Add an affinity to the group
|
|
|
|
out := grp.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Len(t, 1, grp.Affinities)
|
2018-07-16 13:30:58 +00:00
|
|
|
|
|
|
|
// Check that the group was returned
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, grp, out)
|
2018-07-16 13:30:58 +00:00
|
|
|
|
|
|
|
// Add a second affinity
|
|
|
|
grp.AddAffinity(NewAffinity("${node.affinity}", "=", "dc2", 50))
|
|
|
|
expect := []*Affinity{
|
|
|
|
{
|
|
|
|
LTarget: "kernel.version",
|
|
|
|
RTarget: "4.6",
|
|
|
|
Operand: "=",
|
2022-08-17 16:26:34 +00:00
|
|
|
Weight: pointerOf(int8(100)),
|
2018-07-16 13:30:58 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
LTarget: "${node.affinity}",
|
|
|
|
RTarget: "dc2",
|
|
|
|
Operand: "=",
|
2022-08-17 16:26:34 +00:00
|
|
|
Weight: pointerOf(int8(50)),
|
2018-07-16 13:30:58 +00:00
|
|
|
},
|
|
|
|
}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, grp.Affinities)
|
2018-07-16 13:30:58 +00:00
|
|
|
}
|
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
func TestTaskGroup_SetMeta(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
grp := NewTaskGroup("grp1", 1)
|
|
|
|
|
|
|
|
// Initializes an empty map
|
|
|
|
out := grp.SetMeta("foo", "bar")
|
2023-01-01 18:57:26 +00:00
|
|
|
must.NotNil(t, grp.Meta)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Check that we returned the group
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, grp, out)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Add a second meta k/v
|
|
|
|
grp.SetMeta("baz", "zip")
|
|
|
|
expect := map[string]string{"foo": "bar", "baz": "zip"}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, grp.Meta)
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
|
|
|
|
2018-07-18 15:53:03 +00:00
|
|
|
func TestTaskGroup_AddSpread(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2018-07-18 15:53:03 +00:00
|
|
|
grp := NewTaskGroup("grp1", 1)
|
|
|
|
|
|
|
|
// Create and add spread
|
|
|
|
spreadTarget := NewSpreadTarget("r1", 50)
|
|
|
|
spread := NewSpread("${meta.rack}", 100, []*SpreadTarget{spreadTarget})
|
|
|
|
|
|
|
|
out := grp.AddSpread(spread)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Len(t, 1, grp.Spreads)
|
2018-07-18 15:53:03 +00:00
|
|
|
|
|
|
|
// Check that the group was returned
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, grp, out)
|
2018-07-18 15:53:03 +00:00
|
|
|
|
|
|
|
// Add a second spread
|
|
|
|
spreadTarget2 := NewSpreadTarget("dc1", 100)
|
|
|
|
spread2 := NewSpread("${node.datacenter}", 100, []*SpreadTarget{spreadTarget2})
|
|
|
|
|
|
|
|
grp.AddSpread(spread2)
|
|
|
|
|
|
|
|
expect := []*Spread{
|
|
|
|
{
|
|
|
|
Attribute: "${meta.rack}",
|
2022-08-17 16:26:34 +00:00
|
|
|
Weight: pointerOf(int8(100)),
|
2018-07-18 15:53:03 +00:00
|
|
|
SpreadTarget: []*SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "r1",
|
|
|
|
Percent: 50,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Attribute: "${node.datacenter}",
|
2022-08-17 16:26:34 +00:00
|
|
|
Weight: pointerOf(int8(100)),
|
2018-07-18 15:53:03 +00:00
|
|
|
SpreadTarget: []*SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "dc1",
|
|
|
|
Percent: 100,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, grp.Spreads)
|
2018-07-18 15:53:03 +00:00
|
|
|
}
|
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
func TestTaskGroup_AddTask(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
grp := NewTaskGroup("grp1", 1)
|
|
|
|
|
|
|
|
// Add the task to the task group
|
|
|
|
out := grp.AddTask(NewTask("task1", "java"))
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Len(t, 1, out.Tasks)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Check that we returned the group
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, grp, out)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Add a second task
|
|
|
|
grp.AddTask(NewTask("task2", "exec"))
|
|
|
|
expect := []*Task{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
Name: "task1",
|
|
|
|
Driver: "java",
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
Name: "task2",
|
|
|
|
Driver: "exec",
|
|
|
|
},
|
|
|
|
}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, grp.Tasks)
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTask_NewTask(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
expect := &Task{
|
|
|
|
Name: "task1",
|
|
|
|
Driver: "exec",
|
|
|
|
}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, task)
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTask_SetConfig(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
|
|
|
|
// Initializes an empty map
|
|
|
|
out := task.SetConfig("foo", "bar")
|
2023-01-01 18:57:26 +00:00
|
|
|
must.NotNil(t, task.Config)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Check that we returned the task
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, task, out)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Set another config value
|
|
|
|
task.SetConfig("baz", "zip")
|
2015-11-15 01:30:36 +00:00
|
|
|
expect := map[string]interface{}{"foo": "bar", "baz": "zip"}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, task.Config)
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTask_SetMeta(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
|
|
|
|
// Initializes an empty map
|
|
|
|
out := task.SetMeta("foo", "bar")
|
2023-01-01 18:57:26 +00:00
|
|
|
must.NotNil(t, out)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Check that we returned the task
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, task, out)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Set another meta k/v
|
|
|
|
task.SetMeta("baz", "zip")
|
|
|
|
expect := map[string]string{"foo": "bar", "baz": "zip"}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, task.Meta)
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTask_Require(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
|
|
|
|
// Create some require resources
|
|
|
|
resources := &Resources{
|
2022-08-17 16:26:34 +00:00
|
|
|
CPU: pointerOf(1250),
|
|
|
|
MemoryMB: pointerOf(128),
|
|
|
|
DiskMB: pointerOf(2048),
|
2015-09-10 00:59:18 +00:00
|
|
|
Networks: []*NetworkResource{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
CIDR: "0.0.0.0/0",
|
2022-08-17 16:26:34 +00:00
|
|
|
MBits: pointerOf(100),
|
2020-06-19 17:53:31 +00:00
|
|
|
ReservedPorts: []Port{{"", 80, 0, ""}, {"", 443, 0, ""}},
|
2015-09-10 00:59:18 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
out := task.Require(resources)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, resources, task.Resources)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Check that we returned the task
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, task, out)
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTask_Constrain(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2015-09-10 00:59:18 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
|
|
|
|
// Add a constraint to the task
|
2015-10-27 21:31:14 +00:00
|
|
|
out := task.Constrain(NewConstraint("kernel.name", "=", "darwin"))
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Len(t, 1, task.Constraints)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Check that the task was returned
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, task, out)
|
2015-09-10 00:59:18 +00:00
|
|
|
|
|
|
|
// Add a second constraint
|
2015-10-27 21:31:14 +00:00
|
|
|
task.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
|
2015-09-10 00:59:18 +00:00
|
|
|
expect := []*Constraint{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
LTarget: "kernel.name",
|
|
|
|
RTarget: "darwin",
|
|
|
|
Operand: "=",
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-09-10 00:59:18 +00:00
|
|
|
LTarget: "memory.totalbytes",
|
|
|
|
RTarget: "128000000",
|
|
|
|
Operand: ">=",
|
|
|
|
},
|
|
|
|
}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, task.Constraints)
|
2015-09-10 00:59:18 +00:00
|
|
|
}
|
2017-07-06 03:44:49 +00:00
|
|
|
|
2018-07-16 13:30:58 +00:00
|
|
|
func TestTask_AddAffinity(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2018-07-16 13:30:58 +00:00
|
|
|
task := NewTask("task1", "exec")
|
|
|
|
|
|
|
|
// Add an affinity to the task
|
|
|
|
out := task.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Len(t, 1, out.Affinities)
|
2018-07-16 13:30:58 +00:00
|
|
|
|
|
|
|
// Check that the task was returned
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, task, out)
|
2018-07-16 13:30:58 +00:00
|
|
|
|
|
|
|
// Add a second affinity
|
|
|
|
task.AddAffinity(NewAffinity("${node.datacenter}", "=", "dc2", 50))
|
|
|
|
expect := []*Affinity{
|
|
|
|
{
|
|
|
|
LTarget: "kernel.version",
|
|
|
|
RTarget: "4.6",
|
|
|
|
Operand: "=",
|
2022-08-17 16:26:34 +00:00
|
|
|
Weight: pointerOf(int8(100)),
|
2018-07-16 13:30:58 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
LTarget: "${node.datacenter}",
|
|
|
|
RTarget: "dc2",
|
|
|
|
Operand: "=",
|
2022-08-17 16:26:34 +00:00
|
|
|
Weight: pointerOf(int8(50)),
|
2018-07-16 13:30:58 +00:00
|
|
|
},
|
|
|
|
}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, expect, task.Affinities)
|
2018-07-16 13:30:58 +00:00
|
|
|
}
|
|
|
|
|
2017-07-06 03:44:49 +00:00
|
|
|
func TestTask_Artifact(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2017-07-06 03:44:49 +00:00
|
|
|
a := TaskArtifact{
|
2022-08-17 16:26:34 +00:00
|
|
|
GetterSource: pointerOf("http://localhost/foo.txt"),
|
|
|
|
GetterMode: pointerOf("file"),
|
2020-11-12 16:25:57 +00:00
|
|
|
GetterHeaders: make(map[string]string),
|
|
|
|
GetterOptions: make(map[string]string),
|
2017-07-06 03:44:49 +00:00
|
|
|
}
|
|
|
|
a.Canonicalize()
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, "file", *a.GetterMode)
|
|
|
|
must.Eq(t, "local/foo.txt", filepath.ToSlash(*a.RelativeDest))
|
|
|
|
must.Nil(t, a.GetterOptions)
|
|
|
|
must.Nil(t, a.GetterHeaders)
|
2017-07-06 03:44:49 +00:00
|
|
|
}
|
2017-08-30 18:35:19 +00:00
|
|
|
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
func TestTask_VolumeMount(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
|
|
|
vm := new(VolumeMount)
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
vm.Canonicalize()
|
2023-01-01 18:57:26 +00:00
|
|
|
must.NotNil(t, vm.PropagationMode)
|
|
|
|
must.Eq(t, "private", *vm.PropagationMode)
|
volumes: Add support for mount propagation
This commit introduces support for configuring mount propagation when
mounting volumes with the `volume_mount` stanza on Linux targets.
Similar to Kubernetes, we expose 3 options for configuring mount
propagation:
- private, which is equivalent to `rprivate` on Linux, which does not allow the
container to see any new nested mounts after the chroot was created.
- host-to-task, which is equivalent to `rslave` on Linux, which allows new mounts
that have been created _outside of the container_ to be visible
inside the container after the chroot is created.
- bidirectional, which is equivalent to `rshared` on Linux, which allows both
the container to see new mounts created on the host, but
importantly _allows the container to create mounts that are
visible in other containers an don the host_
private and host-to-task are safe, but bidirectional mounts can be
dangerous, as if the code inside a container creates a mount, and does
not clean it up before tearing down the container, it can cause bad
things to happen inside the kernel.
To add a layer of safety here, we require that the user has ReadWrite
permissions on the volume before allowing bidirectional mounts, as a
defense in depth / validation case, although creating mounts should also require
a priviliged execution environment inside the container.
2019-09-13 21:13:20 +00:00
|
|
|
}
|
|
|
|
|
2019-12-12 18:59:38 +00:00
|
|
|
func TestTask_Canonicalize_TaskLifecycle(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2019-12-12 18:59:38 +00:00
|
|
|
testCases := []struct {
|
|
|
|
name string
|
|
|
|
expected *TaskLifecycle
|
|
|
|
task *Task
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "empty",
|
|
|
|
task: &Task{
|
|
|
|
Lifecycle: &TaskLifecycle{},
|
|
|
|
},
|
|
|
|
expected: nil,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
tg := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("foo"),
|
2019-12-12 18:59:38 +00:00
|
|
|
}
|
|
|
|
j := &Job{
|
2022-08-17 16:26:34 +00:00
|
|
|
ID: pointerOf("test"),
|
2019-12-12 18:59:38 +00:00
|
|
|
}
|
|
|
|
tc.task.Canonicalize(tg, j)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, tc.expected, tc.task.Lifecycle)
|
2019-12-12 18:59:38 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-10 15:19:07 +00:00
|
|
|
func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2022-01-10 15:19:07 +00:00
|
|
|
taskWithWait := func(wc *WaitConfig) *Task {
|
|
|
|
return &Task{
|
|
|
|
Templates: []*Template{
|
|
|
|
{
|
|
|
|
Wait: wc,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []struct {
|
|
|
|
name string
|
|
|
|
canonicalized *WaitConfig
|
|
|
|
copied *WaitConfig
|
|
|
|
task *Task
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "all-fields",
|
|
|
|
task: taskWithWait(&WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Min: pointerOf(time.Duration(5)),
|
|
|
|
Max: pointerOf(time.Duration(10)),
|
2022-01-10 15:19:07 +00:00
|
|
|
}),
|
|
|
|
canonicalized: &WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Min: pointerOf(time.Duration(5)),
|
|
|
|
Max: pointerOf(time.Duration(10)),
|
2022-01-10 15:19:07 +00:00
|
|
|
},
|
|
|
|
copied: &WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Min: pointerOf(time.Duration(5)),
|
|
|
|
Max: pointerOf(time.Duration(10)),
|
2022-01-10 15:19:07 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "no-fields",
|
|
|
|
task: taskWithWait(&WaitConfig{}),
|
|
|
|
canonicalized: &WaitConfig{
|
|
|
|
Min: nil,
|
|
|
|
Max: nil,
|
|
|
|
},
|
|
|
|
copied: &WaitConfig{
|
|
|
|
Min: nil,
|
|
|
|
Max: nil,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "min-only",
|
|
|
|
task: taskWithWait(&WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Min: pointerOf(time.Duration(5)),
|
2022-01-10 15:19:07 +00:00
|
|
|
}),
|
|
|
|
canonicalized: &WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Min: pointerOf(time.Duration(5)),
|
2022-01-10 15:19:07 +00:00
|
|
|
},
|
|
|
|
copied: &WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Min: pointerOf(time.Duration(5)),
|
2022-01-10 15:19:07 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "max-only",
|
|
|
|
task: taskWithWait(&WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Max: pointerOf(time.Duration(10)),
|
2022-01-10 15:19:07 +00:00
|
|
|
}),
|
|
|
|
canonicalized: &WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Max: pointerOf(time.Duration(10)),
|
2022-01-10 15:19:07 +00:00
|
|
|
},
|
|
|
|
copied: &WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Max: pointerOf(time.Duration(10)),
|
2022-01-10 15:19:07 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
tg := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("foo"),
|
2022-01-10 15:19:07 +00:00
|
|
|
}
|
|
|
|
j := &Job{
|
2022-08-17 16:26:34 +00:00
|
|
|
ID: pointerOf("test"),
|
2022-01-10 15:19:07 +00:00
|
|
|
}
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, tc.copied, tc.task.Templates[0].Wait.Copy())
|
2022-01-10 15:19:07 +00:00
|
|
|
tc.task.Canonicalize(tg, j)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, tc.canonicalized, tc.task.Templates[0].Wait)
|
2022-01-10 15:19:07 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-05 18:18:10 +00:00
|
|
|
func TestTask_Canonicalize_Vault(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
name string
|
|
|
|
input *Vault
|
|
|
|
expected *Vault
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "empty",
|
|
|
|
input: &Vault{},
|
|
|
|
expected: &Vault{
|
2022-08-17 16:26:34 +00:00
|
|
|
Env: pointerOf(true),
|
Add `disable_file` parameter to job's `vault` stanza (#13343)
This complements the `env` parameter, so that the operator can author
tasks that don't share their Vault token with the workload when using
`image` filesystem isolation. As a result, more powerful tokens can be used
in a job definition, allowing it to use template stanzas to issue all kinds of
secrets (database secrets, Vault tokens with very specific policies, etc.),
without sharing that issuing power with the task itself.
This is accomplished by creating a directory called `private` within
the task's working directory, which shares many properties of
the `secrets` directory (tmpfs where possible, not accessible by
`nomad alloc fs` or Nomad's web UI), but isn't mounted into/bound to the
container.
If the `disable_file` parameter is set to `false` (its default), the Vault token
is also written to the NOMAD_SECRETS_DIR, so the default behavior is
backwards compatible. Even if the operator never changes the default,
they will still benefit from the improved behavior of Nomad never reading
the token back in from that - potentially altered - location.
2023-06-23 19:15:04 +00:00
|
|
|
DisableFile: pointerOf(false),
|
2022-08-17 16:26:34 +00:00
|
|
|
Namespace: pointerOf(""),
|
|
|
|
ChangeMode: pointerOf("restart"),
|
|
|
|
ChangeSignal: pointerOf("SIGHUP"),
|
2022-04-05 18:18:10 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
tc.input.Canonicalize()
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, tc.expected, tc.input)
|
2022-04-05 18:18:10 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-30 18:35:19 +00:00
|
|
|
// Ensures no regression on https://github.com/hashicorp/nomad/issues/3132
|
|
|
|
func TestTaskGroup_Canonicalize_Update(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2019-07-18 17:04:20 +00:00
|
|
|
// Job with an Empty() Update
|
2017-08-30 18:35:19 +00:00
|
|
|
job := &Job{
|
2022-08-17 16:26:34 +00:00
|
|
|
ID: pointerOf("test"),
|
2017-08-30 18:35:19 +00:00
|
|
|
Update: &UpdateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
AutoRevert: pointerOf(false),
|
|
|
|
AutoPromote: pointerOf(false),
|
|
|
|
Canary: pointerOf(0),
|
|
|
|
HealthCheck: pointerOf(""),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(0)),
|
|
|
|
ProgressDeadline: pointerOf(time.Duration(0)),
|
|
|
|
MaxParallel: pointerOf(0),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(0)),
|
|
|
|
Stagger: pointerOf(time.Duration(0)),
|
2017-08-30 18:35:19 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
tg := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("foo"),
|
2017-08-30 18:35:19 +00:00
|
|
|
}
|
|
|
|
tg.Canonicalize(job)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.NotNil(t, job.Update)
|
|
|
|
must.Nil(t, tg.Update)
|
2017-08-30 18:35:19 +00:00
|
|
|
}
|
2018-01-09 22:53:34 +00:00
|
|
|
|
2020-03-22 11:54:04 +00:00
|
|
|
func TestTaskGroup_Canonicalize_Scaling(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2020-03-22 11:54:04 +00:00
|
|
|
|
|
|
|
job := &Job{
|
2022-08-17 16:26:34 +00:00
|
|
|
ID: pointerOf("test"),
|
2020-03-22 11:54:04 +00:00
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
tg := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("foo"),
|
2020-03-22 11:54:04 +00:00
|
|
|
Count: nil,
|
|
|
|
Scaling: &ScalingPolicy{
|
|
|
|
Min: nil,
|
2022-08-17 16:26:34 +00:00
|
|
|
Max: pointerOf(int64(10)),
|
2020-03-22 11:54:04 +00:00
|
|
|
Policy: nil,
|
|
|
|
Enabled: nil,
|
|
|
|
CreateIndex: 0,
|
|
|
|
ModifyIndex: 0,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
job.TaskGroups = []*TaskGroup{tg}
|
|
|
|
|
|
|
|
// both nil => both == 1
|
|
|
|
tg.Canonicalize(job)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Positive(t, *tg.Count)
|
|
|
|
must.NotNil(t, tg.Scaling.Min)
|
|
|
|
must.Eq(t, 1, *tg.Count)
|
|
|
|
must.Eq(t, int64(*tg.Count), *tg.Scaling.Min)
|
2020-03-22 11:54:04 +00:00
|
|
|
|
2020-03-24 19:29:34 +00:00
|
|
|
// count == nil => count = Scaling.Min
|
2020-03-22 11:54:04 +00:00
|
|
|
tg.Count = nil
|
2022-08-17 16:26:34 +00:00
|
|
|
tg.Scaling.Min = pointerOf(int64(5))
|
2020-03-22 11:54:04 +00:00
|
|
|
tg.Canonicalize(job)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Positive(t, *tg.Count)
|
|
|
|
must.NotNil(t, tg.Scaling.Min)
|
|
|
|
must.Eq(t, 5, *tg.Count)
|
|
|
|
must.Eq(t, int64(*tg.Count), *tg.Scaling.Min)
|
2020-03-22 11:54:04 +00:00
|
|
|
|
|
|
|
// Scaling.Min == nil => Scaling.Min == count
|
2022-08-17 16:26:34 +00:00
|
|
|
tg.Count = pointerOf(5)
|
2020-03-22 11:54:04 +00:00
|
|
|
tg.Scaling.Min = nil
|
|
|
|
tg.Canonicalize(job)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Positive(t, *tg.Count)
|
|
|
|
must.NotNil(t, tg.Scaling.Min)
|
|
|
|
must.Eq(t, 5, *tg.Scaling.Min)
|
|
|
|
must.Eq(t, int64(*tg.Count), *tg.Scaling.Min)
|
2020-03-24 19:29:34 +00:00
|
|
|
|
|
|
|
// both present, both persisted
|
2022-08-17 16:26:34 +00:00
|
|
|
tg.Count = pointerOf(5)
|
|
|
|
tg.Scaling.Min = pointerOf(int64(1))
|
2020-03-24 19:29:34 +00:00
|
|
|
tg.Canonicalize(job)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Positive(t, *tg.Count)
|
|
|
|
must.NotNil(t, tg.Scaling.Min)
|
|
|
|
must.Eq(t, 1, *tg.Scaling.Min)
|
|
|
|
must.Eq(t, 5, *tg.Count)
|
2020-03-22 11:54:04 +00:00
|
|
|
}
|
|
|
|
|
2019-07-18 17:04:20 +00:00
|
|
|
func TestTaskGroup_Merge_Update(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2019-07-18 17:04:20 +00:00
|
|
|
job := &Job{
|
2022-08-17 16:26:34 +00:00
|
|
|
ID: pointerOf("test"),
|
2019-07-18 17:04:20 +00:00
|
|
|
Update: &UpdateStrategy{},
|
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
|
2023-01-30 14:48:43 +00:00
|
|
|
// Merge and canonicalize part of an update block
|
2019-07-18 17:04:20 +00:00
|
|
|
tg := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("foo"),
|
2019-07-18 17:04:20 +00:00
|
|
|
Update: &UpdateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
AutoRevert: pointerOf(true),
|
|
|
|
Canary: pointerOf(5),
|
|
|
|
HealthCheck: pointerOf("foo"),
|
2019-07-18 17:04:20 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
tg.Canonicalize(job)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, &UpdateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
AutoRevert: pointerOf(true),
|
|
|
|
AutoPromote: pointerOf(false),
|
|
|
|
Canary: pointerOf(5),
|
|
|
|
HealthCheck: pointerOf("foo"),
|
|
|
|
HealthyDeadline: pointerOf(5 * time.Minute),
|
|
|
|
ProgressDeadline: pointerOf(10 * time.Minute),
|
|
|
|
MaxParallel: pointerOf(1),
|
|
|
|
MinHealthyTime: pointerOf(10 * time.Second),
|
|
|
|
Stagger: pointerOf(30 * time.Second),
|
2019-07-18 17:04:20 +00:00
|
|
|
}, tg.Update)
|
|
|
|
}
|
|
|
|
|
2018-03-01 19:21:32 +00:00
|
|
|
// Verifies that migrate strategy is merged correctly
|
|
|
|
func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2018-03-01 19:21:32 +00:00
|
|
|
type testCase struct {
|
|
|
|
desc string
|
|
|
|
jobType string
|
|
|
|
jobMigrate *MigrateStrategy
|
|
|
|
taskMigrate *MigrateStrategy
|
|
|
|
expected *MigrateStrategy
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []testCase{
|
|
|
|
{
|
|
|
|
desc: "Default batch",
|
|
|
|
jobType: "batch",
|
|
|
|
jobMigrate: nil,
|
|
|
|
taskMigrate: nil,
|
|
|
|
expected: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Default service",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: nil,
|
|
|
|
taskMigrate: nil,
|
|
|
|
expected: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(1),
|
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(10 * time.Second),
|
|
|
|
HealthyDeadline: pointerOf(5 * time.Minute),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Empty job migrate strategy",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(0),
|
|
|
|
HealthCheck: pointerOf(""),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(0)),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(0)),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
taskMigrate: nil,
|
|
|
|
expected: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(0),
|
|
|
|
HealthCheck: pointerOf(""),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(0)),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(0)),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Inherit from job",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(3),
|
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(2)),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(2)),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
taskMigrate: nil,
|
|
|
|
expected: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(3),
|
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(2)),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(2)),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Set in task",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: nil,
|
|
|
|
taskMigrate: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(3),
|
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(2)),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(2)),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
expected: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(3),
|
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(2)),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(2)),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Merge from job",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(11),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
taskMigrate: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(2)),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(2)),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
expected: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(11),
|
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(2)),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(2)),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Override from group",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(11),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
taskMigrate: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(5),
|
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(2)),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(2)),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
expected: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(5),
|
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(time.Duration(2)),
|
|
|
|
HealthyDeadline: pointerOf(time.Duration(2)),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "Parallel from job, defaulting",
|
|
|
|
jobType: "service",
|
|
|
|
jobMigrate: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(5),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
taskMigrate: nil,
|
|
|
|
expected: &MigrateStrategy{
|
2022-08-17 16:26:34 +00:00
|
|
|
MaxParallel: pointerOf(5),
|
|
|
|
HealthCheck: pointerOf("checks"),
|
|
|
|
MinHealthyTime: pointerOf(10 * time.Second),
|
|
|
|
HealthyDeadline: pointerOf(5 * time.Minute),
|
2018-03-01 19:21:32 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.desc, func(t *testing.T) {
|
|
|
|
job := &Job{
|
2022-08-17 16:26:34 +00:00
|
|
|
ID: pointerOf("test"),
|
2018-03-01 19:21:32 +00:00
|
|
|
Migrate: tc.jobMigrate,
|
2022-08-17 16:26:34 +00:00
|
|
|
Type: pointerOf(tc.jobType),
|
2018-03-01 19:21:32 +00:00
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
tg := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("foo"),
|
2018-03-01 19:21:32 +00:00
|
|
|
Migrate: tc.taskMigrate,
|
|
|
|
}
|
|
|
|
tg.Canonicalize(job)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, tc.expected, tg.Migrate)
|
2018-03-01 19:21:32 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:48:43 +00:00
|
|
|
// TestSpread_Canonicalize asserts that the spread block is canonicalized correctly
|
2019-01-11 15:48:12 +00:00
|
|
|
func TestSpread_Canonicalize(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2019-01-11 15:48:12 +00:00
|
|
|
job := &Job{
|
2022-08-17 16:26:34 +00:00
|
|
|
ID: pointerOf("test"),
|
|
|
|
Type: pointerOf("batch"),
|
2019-01-11 15:48:12 +00:00
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
tg := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("foo"),
|
2019-01-11 15:48:12 +00:00
|
|
|
}
|
|
|
|
type testCase struct {
|
|
|
|
desc string
|
|
|
|
spread *Spread
|
2019-01-30 20:20:38 +00:00
|
|
|
expectedWeight int8
|
2019-01-11 15:48:12 +00:00
|
|
|
}
|
|
|
|
cases := []testCase{
|
|
|
|
{
|
|
|
|
"Nil spread",
|
|
|
|
&Spread{
|
|
|
|
Attribute: "test",
|
|
|
|
Weight: nil,
|
|
|
|
},
|
|
|
|
50,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Zero spread",
|
|
|
|
&Spread{
|
|
|
|
Attribute: "test",
|
2022-08-17 16:26:34 +00:00
|
|
|
Weight: pointerOf(int8(0)),
|
2019-01-11 15:48:12 +00:00
|
|
|
},
|
|
|
|
0,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Non Zero spread",
|
|
|
|
&Spread{
|
|
|
|
Attribute: "test",
|
2022-08-17 16:26:34 +00:00
|
|
|
Weight: pointerOf(int8(100)),
|
2019-01-11 15:48:12 +00:00
|
|
|
},
|
|
|
|
100,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
t.Run(tc.desc, func(t *testing.T) {
|
|
|
|
tg.Spreads = []*Spread{tc.spread}
|
|
|
|
tg.Canonicalize(job)
|
|
|
|
for _, spr := range tg.Spreads {
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, tc.expectedWeight, *spr.Weight)
|
2019-01-11 15:48:12 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-02-21 08:14:36 +00:00
|
|
|
|
|
|
|
func Test_NewDefaultReschedulePolicy(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2020-02-21 08:14:36 +00:00
|
|
|
testCases := []struct {
|
|
|
|
desc string
|
|
|
|
inputJobType string
|
|
|
|
expected *ReschedulePolicy
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
desc: "service job type",
|
|
|
|
inputJobType: "service",
|
|
|
|
expected: &ReschedulePolicy{
|
2022-08-17 16:26:34 +00:00
|
|
|
Attempts: pointerOf(0),
|
|
|
|
Interval: pointerOf(time.Duration(0)),
|
|
|
|
Delay: pointerOf(30 * time.Second),
|
|
|
|
DelayFunction: pointerOf("exponential"),
|
|
|
|
MaxDelay: pointerOf(1 * time.Hour),
|
|
|
|
Unlimited: pointerOf(true),
|
2020-02-21 08:14:36 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "batch job type",
|
|
|
|
inputJobType: "batch",
|
|
|
|
expected: &ReschedulePolicy{
|
2022-08-17 16:26:34 +00:00
|
|
|
Attempts: pointerOf(1),
|
|
|
|
Interval: pointerOf(24 * time.Hour),
|
|
|
|
Delay: pointerOf(5 * time.Second),
|
|
|
|
DelayFunction: pointerOf("constant"),
|
|
|
|
MaxDelay: pointerOf(time.Duration(0)),
|
|
|
|
Unlimited: pointerOf(false),
|
2020-02-21 08:14:36 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "system job type",
|
|
|
|
inputJobType: "system",
|
|
|
|
expected: &ReschedulePolicy{
|
2022-08-17 16:26:34 +00:00
|
|
|
Attempts: pointerOf(0),
|
|
|
|
Interval: pointerOf(time.Duration(0)),
|
|
|
|
Delay: pointerOf(time.Duration(0)),
|
|
|
|
DelayFunction: pointerOf(""),
|
|
|
|
MaxDelay: pointerOf(time.Duration(0)),
|
|
|
|
Unlimited: pointerOf(false),
|
2020-02-21 08:14:36 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "unrecognised job type",
|
|
|
|
inputJobType: "unrecognised",
|
|
|
|
expected: &ReschedulePolicy{
|
2022-08-17 16:26:34 +00:00
|
|
|
Attempts: pointerOf(0),
|
|
|
|
Interval: pointerOf(time.Duration(0)),
|
|
|
|
Delay: pointerOf(time.Duration(0)),
|
|
|
|
DelayFunction: pointerOf(""),
|
|
|
|
MaxDelay: pointerOf(time.Duration(0)),
|
|
|
|
Unlimited: pointerOf(false),
|
2020-02-21 08:14:36 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.desc, func(t *testing.T) {
|
|
|
|
actual := NewDefaultReschedulePolicy(tc.inputJobType)
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, tc.expected, actual)
|
2020-02-21 08:14:36 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2021-03-16 18:22:21 +00:00
|
|
|
|
|
|
|
func TestTaskGroup_Canonicalize_Consul(t *testing.T) {
|
2022-03-17 13:34:57 +00:00
|
|
|
testutil.Parallel(t)
|
2023-01-01 18:57:26 +00:00
|
|
|
|
2021-03-16 18:22:21 +00:00
|
|
|
t.Run("override job consul in group", func(t *testing.T) {
|
|
|
|
job := &Job{
|
2022-08-17 16:26:34 +00:00
|
|
|
ID: pointerOf("job"),
|
|
|
|
ConsulNamespace: pointerOf("ns1"),
|
2021-03-16 18:22:21 +00:00
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
|
|
|
|
tg := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("group"),
|
2021-03-16 18:22:21 +00:00
|
|
|
Consul: &Consul{Namespace: "ns2"},
|
|
|
|
}
|
|
|
|
tg.Canonicalize(job)
|
|
|
|
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, "ns1", *job.ConsulNamespace)
|
|
|
|
must.Eq(t, "ns2", tg.Consul.Namespace)
|
2021-03-16 18:22:21 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("inherit job consul in group", func(t *testing.T) {
|
|
|
|
job := &Job{
|
2022-08-17 16:26:34 +00:00
|
|
|
ID: pointerOf("job"),
|
|
|
|
ConsulNamespace: pointerOf("ns1"),
|
2021-03-16 18:22:21 +00:00
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
|
|
|
|
tg := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("group"),
|
2021-03-16 18:22:21 +00:00
|
|
|
Consul: nil, // not set, inherit from job
|
|
|
|
}
|
|
|
|
tg.Canonicalize(job)
|
|
|
|
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, "ns1", *job.ConsulNamespace)
|
|
|
|
must.Eq(t, "ns1", tg.Consul.Namespace)
|
2021-03-16 18:22:21 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("set in group only", func(t *testing.T) {
|
|
|
|
job := &Job{
|
2022-08-17 16:26:34 +00:00
|
|
|
ID: pointerOf("job"),
|
2021-03-16 18:22:21 +00:00
|
|
|
ConsulNamespace: nil,
|
|
|
|
}
|
|
|
|
job.Canonicalize()
|
|
|
|
|
|
|
|
tg := &TaskGroup{
|
2022-08-17 16:26:34 +00:00
|
|
|
Name: pointerOf("group"),
|
2021-03-16 18:22:21 +00:00
|
|
|
Consul: &Consul{Namespace: "ns2"},
|
|
|
|
}
|
|
|
|
tg.Canonicalize(job)
|
|
|
|
|
2023-01-01 18:57:26 +00:00
|
|
|
must.Eq(t, "", *job.ConsulNamespace)
|
|
|
|
must.Eq(t, "ns2", tg.Consul.Namespace)
|
2021-03-16 18:22:21 +00:00
|
|
|
})
|
|
|
|
}
|