2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2015-08-13 23:25:59 +00:00
|
|
|
package scheduler
|
|
|
|
|
|
|
|
import (
|
2015-09-07 18:23:38 +00:00
|
|
|
"reflect"
|
2015-08-13 23:25:59 +00:00
|
|
|
"testing"
|
2022-01-10 15:19:07 +00:00
|
|
|
"time"
|
2015-08-13 23:25:59 +00:00
|
|
|
|
2022-03-15 12:42:43 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2022-08-17 16:26:34 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/pointer"
|
2018-06-13 22:33:25 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/testlog"
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2015-08-13 23:25:59 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
2015-08-14 00:19:09 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
2015-08-13 23:25:59 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2023-03-14 14:46:00 +00:00
|
|
|
"github.com/shoenig/test/must"
|
|
|
|
"github.com/stretchr/testify/require"
|
2015-08-13 23:25:59 +00:00
|
|
|
)
|
|
|
|
|
2023-03-13 15:44:14 +00:00
|
|
|
func BenchmarkTasksUpdated(b *testing.B) {
|
|
|
|
jobA := mock.BigBenchmarkJob()
|
|
|
|
jobB := jobA.Copy()
|
|
|
|
for n := 0; n < b.N; n++ {
|
2023-03-14 14:46:00 +00:00
|
|
|
if c := tasksUpdated(jobA, jobB, jobA.TaskGroups[0].Name); c.modified {
|
|
|
|
b.Errorf("tasks should be the same")
|
2023-03-13 15:44:14 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-09 21:31:38 +00:00
|
|
|
func newNode(name string) *structs.Node {
|
|
|
|
n := mock.Node()
|
|
|
|
n.Name = name
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2023-06-07 14:39:03 +00:00
|
|
|
func TestReadyNodesInDCsAndPool(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-10-13 21:36:02 +00:00
|
|
|
state := state.TestStateStore(t)
|
2015-08-14 00:19:09 +00:00
|
|
|
node1 := mock.Node()
|
|
|
|
node2 := mock.Node()
|
|
|
|
node2.Datacenter = "dc2"
|
|
|
|
node3 := mock.Node()
|
|
|
|
node3.Datacenter = "dc2"
|
|
|
|
node3.Status = structs.NodeStatusDown
|
2021-02-11 15:40:59 +00:00
|
|
|
node4 := mock.DrainNode()
|
2023-02-02 14:57:45 +00:00
|
|
|
node5 := mock.Node()
|
|
|
|
node5.Datacenter = "not-this-dc"
|
2023-06-07 14:39:03 +00:00
|
|
|
node6 := mock.Node()
|
|
|
|
node6.Datacenter = "dc1"
|
|
|
|
node6.NodePool = "other"
|
|
|
|
node7 := mock.Node()
|
|
|
|
node7.Datacenter = "dc2"
|
|
|
|
node7.NodePool = "other"
|
|
|
|
node8 := mock.Node()
|
|
|
|
node8.Datacenter = "dc1"
|
|
|
|
node8.NodePool = "other"
|
|
|
|
node8.Status = structs.NodeStatusDown
|
|
|
|
node9 := mock.DrainNode()
|
|
|
|
node9.Datacenter = "dc2"
|
|
|
|
node9.NodePool = "other"
|
2015-08-14 00:19:09 +00:00
|
|
|
|
2023-02-02 14:57:45 +00:00
|
|
|
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1)) // dc1 ready
|
|
|
|
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, node2)) // dc2 ready
|
|
|
|
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3)) // dc2 not ready
|
|
|
|
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1003, node4)) // dc2 not ready
|
|
|
|
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1004, node5)) // ready never match
|
2023-06-07 14:39:03 +00:00
|
|
|
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1005, node6)) // dc1 other pool
|
|
|
|
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1006, node7)) // dc2 other pool
|
|
|
|
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1007, node8)) // dc1 other not ready
|
|
|
|
must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1008, node9)) // dc2 other not ready
|
2015-08-14 00:19:09 +00:00
|
|
|
|
2023-02-02 14:57:45 +00:00
|
|
|
testCases := []struct {
|
|
|
|
name string
|
|
|
|
datacenters []string
|
2023-06-07 14:39:03 +00:00
|
|
|
pool string
|
2023-02-02 14:57:45 +00:00
|
|
|
expectReady []*structs.Node
|
|
|
|
expectNotReady map[string]struct{}
|
|
|
|
expectIndex map[string]int
|
|
|
|
}{
|
|
|
|
{
|
2023-06-07 14:39:03 +00:00
|
|
|
name: "no wildcards in all pool",
|
|
|
|
datacenters: []string{"dc1", "dc2"},
|
|
|
|
pool: structs.NodePoolAll,
|
|
|
|
expectReady: []*structs.Node{node1, node2, node6, node7},
|
|
|
|
expectNotReady: map[string]struct{}{
|
|
|
|
node3.ID: {}, node4.ID: {}, node8.ID: {}, node9.ID: {}},
|
|
|
|
expectIndex: map[string]int{"dc1": 2, "dc2": 2},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "with wildcard in all pool",
|
|
|
|
datacenters: []string{"dc*"},
|
|
|
|
pool: structs.NodePoolAll,
|
|
|
|
expectReady: []*structs.Node{node1, node2, node6, node7},
|
|
|
|
expectNotReady: map[string]struct{}{
|
|
|
|
node3.ID: {}, node4.ID: {}, node8.ID: {}, node9.ID: {}},
|
|
|
|
expectIndex: map[string]int{"dc1": 2, "dc2": 2},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "no wildcards in default pool",
|
2023-02-02 14:57:45 +00:00
|
|
|
datacenters: []string{"dc1", "dc2"},
|
2023-06-07 14:39:03 +00:00
|
|
|
pool: structs.NodePoolDefault,
|
2023-02-02 14:57:45 +00:00
|
|
|
expectReady: []*structs.Node{node1, node2},
|
2023-06-07 14:39:03 +00:00
|
|
|
expectNotReady: map[string]struct{}{node3.ID: {}, node4.ID: {}},
|
2023-02-02 14:57:45 +00:00
|
|
|
expectIndex: map[string]int{"dc1": 1, "dc2": 1},
|
|
|
|
},
|
|
|
|
{
|
2023-06-07 14:39:03 +00:00
|
|
|
name: "with wildcard in default pool",
|
2023-02-02 14:57:45 +00:00
|
|
|
datacenters: []string{"dc*"},
|
2023-06-07 14:39:03 +00:00
|
|
|
pool: structs.NodePoolDefault,
|
2023-02-02 14:57:45 +00:00
|
|
|
expectReady: []*structs.Node{node1, node2},
|
2023-06-07 14:39:03 +00:00
|
|
|
expectNotReady: map[string]struct{}{node3.ID: {}, node4.ID: {}},
|
2023-02-02 14:57:45 +00:00
|
|
|
expectIndex: map[string]int{"dc1": 1, "dc2": 1},
|
|
|
|
},
|
|
|
|
}
|
2021-10-27 14:04:13 +00:00
|
|
|
|
2023-02-02 14:57:45 +00:00
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
2023-06-07 14:39:03 +00:00
|
|
|
ready, notReady, dcIndex, err := readyNodesInDCsAndPool(state, tc.datacenters, tc.pool)
|
2023-02-02 14:57:45 +00:00
|
|
|
must.NoError(t, err)
|
|
|
|
must.SliceContainsAll(t, tc.expectReady, ready, must.Sprint("expected ready to match"))
|
|
|
|
must.Eq(t, tc.expectNotReady, notReady, must.Sprint("expected not-ready to match"))
|
|
|
|
must.Eq(t, tc.expectIndex, dcIndex, must.Sprint("expected datacenter counts to match"))
|
|
|
|
})
|
|
|
|
}
|
2015-08-14 00:19:09 +00:00
|
|
|
}
|
2015-08-14 00:40:23 +00:00
|
|
|
|
|
|
|
func TestRetryMax(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-08-14 00:40:23 +00:00
|
|
|
calls := 0
|
|
|
|
bad := func() (bool, error) {
|
|
|
|
calls += 1
|
|
|
|
return false, nil
|
|
|
|
}
|
2016-02-10 05:24:47 +00:00
|
|
|
err := retryMax(3, bad, nil)
|
2019-11-07 22:39:47 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Equal(t, 3, calls, "mis match")
|
2015-08-14 00:40:23 +00:00
|
|
|
|
2016-02-10 05:24:47 +00:00
|
|
|
calls = 0
|
|
|
|
first := true
|
|
|
|
reset := func() bool {
|
|
|
|
if calls == 3 && first {
|
|
|
|
first = false
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
err = retryMax(3, bad, reset)
|
2019-11-07 22:39:47 +00:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Equal(t, 6, calls, "mis match")
|
2016-02-10 05:24:47 +00:00
|
|
|
|
2015-08-14 00:40:23 +00:00
|
|
|
calls = 0
|
|
|
|
good := func() (bool, error) {
|
|
|
|
calls += 1
|
|
|
|
return true, nil
|
|
|
|
}
|
2016-02-10 05:24:47 +00:00
|
|
|
err = retryMax(3, good, nil)
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, calls, "mis match")
|
2015-08-14 00:40:23 +00:00
|
|
|
}
|
2015-08-14 00:51:31 +00:00
|
|
|
|
|
|
|
func TestTaintedNodes(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-10-13 21:36:02 +00:00
|
|
|
state := state.TestStateStore(t)
|
2015-08-14 00:51:31 +00:00
|
|
|
node1 := mock.Node()
|
|
|
|
node2 := mock.Node()
|
|
|
|
node2.Datacenter = "dc2"
|
|
|
|
node3 := mock.Node()
|
|
|
|
node3.Datacenter = "dc2"
|
|
|
|
node3.Status = structs.NodeStatusDown
|
2021-02-11 15:40:59 +00:00
|
|
|
node4 := mock.DrainNode()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1000, node1))
|
|
|
|
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1001, node2))
|
|
|
|
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1002, node3))
|
|
|
|
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1003, node4))
|
2015-08-14 00:51:31 +00:00
|
|
|
|
|
|
|
allocs := []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{NodeID: node1.ID},
|
|
|
|
{NodeID: node2.ID},
|
|
|
|
{NodeID: node3.ID},
|
|
|
|
{NodeID: node4.ID},
|
|
|
|
{NodeID: "12345678-abcd-efab-cdef-123456789abc"},
|
2015-08-14 00:51:31 +00:00
|
|
|
}
|
|
|
|
tainted, err := taintedNodes(state, allocs)
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 3, len(tainted))
|
|
|
|
require.NotContains(t, tainted, node1.ID)
|
|
|
|
require.NotContains(t, tainted, node2.ID)
|
2015-08-14 00:51:31 +00:00
|
|
|
|
2019-11-07 22:39:47 +00:00
|
|
|
require.Contains(t, tainted, node3.ID)
|
|
|
|
require.NotNil(t, tainted[node3.ID])
|
2016-08-03 22:45:42 +00:00
|
|
|
|
2019-11-07 22:39:47 +00:00
|
|
|
require.Contains(t, tainted, node4.ID)
|
|
|
|
require.NotNil(t, tainted[node4.ID])
|
2016-08-03 22:45:42 +00:00
|
|
|
|
2019-11-07 22:39:47 +00:00
|
|
|
require.Contains(t, tainted, "12345678-abcd-efab-cdef-123456789abc")
|
|
|
|
require.Nil(t, tainted["12345678-abcd-efab-cdef-123456789abc"])
|
2015-08-14 00:51:31 +00:00
|
|
|
}
|
2015-09-07 18:23:38 +00:00
|
|
|
|
|
|
|
func TestShuffleNodes(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-10-17 00:05:23 +00:00
|
|
|
// Use a large number of nodes to make the probability of shuffling to the
|
|
|
|
// original order very low.
|
2015-09-07 18:23:38 +00:00
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
2015-10-17 00:05:23 +00:00
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
2015-09-07 18:23:38 +00:00
|
|
|
}
|
|
|
|
orig := make([]*structs.Node, len(nodes))
|
|
|
|
copy(orig, nodes)
|
2022-02-08 17:16:33 +00:00
|
|
|
eval := mock.Eval() // will have random EvalID
|
|
|
|
plan := eval.MakePlan(mock.Job())
|
|
|
|
shuffleNodes(plan, 1000, nodes)
|
2019-11-07 22:39:47 +00:00
|
|
|
require.False(t, reflect.DeepEqual(nodes, orig))
|
2022-02-08 17:16:33 +00:00
|
|
|
|
|
|
|
nodes2 := make([]*structs.Node, len(nodes))
|
|
|
|
copy(nodes2, orig)
|
|
|
|
shuffleNodes(plan, 1000, nodes2)
|
|
|
|
|
|
|
|
require.True(t, reflect.DeepEqual(nodes, nodes2))
|
|
|
|
|
2015-09-07 18:23:38 +00:00
|
|
|
}
|
2015-09-07 19:25:23 +00:00
|
|
|
|
2019-11-14 20:34:38 +00:00
|
|
|
func TestTaskUpdatedAffinity(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2019-11-14 20:34:38 +00:00
|
|
|
j1 := mock.Job()
|
|
|
|
j2 := mock.Job()
|
|
|
|
name := j1.TaskGroups[0].Name
|
2023-03-14 14:46:00 +00:00
|
|
|
must.False(t, tasksUpdated(j1, j2, name).modified)
|
2019-11-14 20:34:38 +00:00
|
|
|
|
|
|
|
// TaskGroup Affinity
|
|
|
|
j2.TaskGroups[0].Affinities = []*structs.Affinity{
|
|
|
|
{
|
|
|
|
LTarget: "node.datacenter",
|
|
|
|
RTarget: "dc1",
|
|
|
|
Operand: "=",
|
|
|
|
Weight: 100,
|
|
|
|
},
|
|
|
|
}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j2, name).modified)
|
2019-11-14 20:34:38 +00:00
|
|
|
|
|
|
|
// TaskGroup Task Affinity
|
|
|
|
j3 := mock.Job()
|
|
|
|
j3.TaskGroups[0].Tasks[0].Affinities = []*structs.Affinity{
|
|
|
|
{
|
|
|
|
LTarget: "node.datacenter",
|
|
|
|
RTarget: "dc1",
|
|
|
|
Operand: "=",
|
|
|
|
Weight: 100,
|
|
|
|
},
|
|
|
|
}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j3, name).modified)
|
2019-11-14 20:34:38 +00:00
|
|
|
|
|
|
|
j4 := mock.Job()
|
|
|
|
j4.TaskGroups[0].Tasks[0].Affinities = []*structs.Affinity{
|
|
|
|
{
|
|
|
|
LTarget: "node.datacenter",
|
|
|
|
RTarget: "dc1",
|
|
|
|
Operand: "=",
|
|
|
|
Weight: 100,
|
|
|
|
},
|
|
|
|
}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j4, name).modified)
|
2019-11-14 20:34:38 +00:00
|
|
|
|
2019-11-19 18:34:41 +00:00
|
|
|
// check different level of same affinity
|
2019-11-14 20:34:38 +00:00
|
|
|
j5 := mock.Job()
|
|
|
|
j5.Affinities = []*structs.Affinity{
|
|
|
|
{
|
|
|
|
LTarget: "node.datacenter",
|
|
|
|
RTarget: "dc1",
|
|
|
|
Operand: "=",
|
|
|
|
Weight: 100,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
j6 := mock.Job()
|
|
|
|
j6.Affinities = make([]*structs.Affinity, 0)
|
|
|
|
j6.TaskGroups[0].Affinities = []*structs.Affinity{
|
|
|
|
{
|
|
|
|
LTarget: "node.datacenter",
|
|
|
|
RTarget: "dc1",
|
|
|
|
Operand: "=",
|
|
|
|
Weight: 100,
|
|
|
|
},
|
|
|
|
}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.False(t, tasksUpdated(j5, j6, name).modified)
|
2019-11-14 20:34:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestTaskUpdatedSpread(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2019-11-14 20:34:38 +00:00
|
|
|
j1 := mock.Job()
|
|
|
|
j2 := mock.Job()
|
|
|
|
name := j1.TaskGroups[0].Name
|
|
|
|
|
2023-03-14 14:46:00 +00:00
|
|
|
must.False(t, tasksUpdated(j1, j2, name).modified)
|
2019-11-14 20:34:38 +00:00
|
|
|
|
|
|
|
// TaskGroup Spread
|
|
|
|
j2.TaskGroups[0].Spreads = []*structs.Spread{
|
|
|
|
{
|
|
|
|
Attribute: "node.datacenter",
|
|
|
|
Weight: 100,
|
|
|
|
SpreadTarget: []*structs.SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "r1",
|
|
|
|
Percent: 50,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: "r2",
|
|
|
|
Percent: 50,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j2, name).modified)
|
2019-11-14 20:34:38 +00:00
|
|
|
|
|
|
|
// check different level of same constraint
|
|
|
|
j5 := mock.Job()
|
|
|
|
j5.Spreads = []*structs.Spread{
|
|
|
|
{
|
|
|
|
Attribute: "node.datacenter",
|
|
|
|
Weight: 100,
|
|
|
|
SpreadTarget: []*structs.SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "r1",
|
|
|
|
Percent: 50,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: "r2",
|
|
|
|
Percent: 50,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
j6 := mock.Job()
|
|
|
|
j6.TaskGroups[0].Spreads = []*structs.Spread{
|
|
|
|
{
|
|
|
|
Attribute: "node.datacenter",
|
|
|
|
Weight: 100,
|
|
|
|
SpreadTarget: []*structs.SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "r1",
|
|
|
|
Percent: 50,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: "r2",
|
|
|
|
Percent: 50,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2023-03-14 14:46:00 +00:00
|
|
|
must.False(t, tasksUpdated(j5, j6, name).modified)
|
2019-11-14 20:34:38 +00:00
|
|
|
}
|
2023-03-07 15:05:59 +00:00
|
|
|
|
2015-09-07 19:25:23 +00:00
|
|
|
func TestTasksUpdated(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-09-07 19:25:23 +00:00
|
|
|
j1 := mock.Job()
|
|
|
|
j2 := mock.Job()
|
2016-12-16 01:08:38 +00:00
|
|
|
name := j1.TaskGroups[0].Name
|
2023-03-14 14:46:00 +00:00
|
|
|
must.False(t, tasksUpdated(j1, j2, name).modified)
|
2015-09-07 19:25:23 +00:00
|
|
|
|
|
|
|
j2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j2, name).modified)
|
2015-09-07 19:25:23 +00:00
|
|
|
|
|
|
|
j3 := mock.Job()
|
|
|
|
j3.TaskGroups[0].Tasks[0].Name = "foo"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j3, name).modified)
|
2015-09-07 19:25:23 +00:00
|
|
|
|
|
|
|
j4 := mock.Job()
|
|
|
|
j4.TaskGroups[0].Tasks[0].Driver = "foo"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j4, name).modified)
|
2015-09-07 19:25:23 +00:00
|
|
|
|
|
|
|
j5 := mock.Job()
|
|
|
|
j5.TaskGroups[0].Tasks = append(j5.TaskGroups[0].Tasks,
|
|
|
|
j5.TaskGroups[0].Tasks[0])
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j5, name).modified)
|
2015-09-13 23:41:53 +00:00
|
|
|
|
|
|
|
j6 := mock.Job()
|
2020-08-29 01:40:53 +00:00
|
|
|
j6.TaskGroups[0].Networks[0].DynamicPorts = []structs.Port{
|
2017-02-28 00:00:19 +00:00
|
|
|
{Label: "http", Value: 0},
|
|
|
|
{Label: "https", Value: 0},
|
|
|
|
{Label: "admin", Value: 0},
|
|
|
|
}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j6, name).modified)
|
2015-10-23 21:52:06 +00:00
|
|
|
|
|
|
|
j7 := mock.Job()
|
|
|
|
j7.TaskGroups[0].Tasks[0].Env["NEW_ENV"] = "NEW_VALUE"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j7, name).modified)
|
2016-04-26 00:20:25 +00:00
|
|
|
|
|
|
|
j8 := mock.Job()
|
|
|
|
j8.TaskGroups[0].Tasks[0].User = "foo"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j8, name).modified)
|
2016-04-26 00:20:25 +00:00
|
|
|
|
|
|
|
j9 := mock.Job()
|
|
|
|
j9.TaskGroups[0].Tasks[0].Artifacts = []*structs.TaskArtifact{
|
|
|
|
{
|
|
|
|
GetterSource: "http://foo.com/bar",
|
|
|
|
},
|
|
|
|
}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j9, name).modified)
|
2016-04-26 00:20:25 +00:00
|
|
|
|
|
|
|
j10 := mock.Job()
|
|
|
|
j10.TaskGroups[0].Tasks[0].Meta["baz"] = "boom"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j10, name).modified)
|
2016-04-26 00:20:25 +00:00
|
|
|
|
|
|
|
j11 := mock.Job()
|
|
|
|
j11.TaskGroups[0].Tasks[0].Resources.CPU = 1337
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j11, name).modified)
|
2016-05-06 04:32:01 +00:00
|
|
|
|
2019-11-07 17:51:15 +00:00
|
|
|
j11d1 := mock.Job()
|
|
|
|
j11d1.TaskGroups[0].Tasks[0].Resources.Devices = structs.ResourceDevices{
|
|
|
|
&structs.RequestedDevice{
|
|
|
|
Name: "gpu",
|
|
|
|
Count: 1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
j11d2 := mock.Job()
|
|
|
|
j11d2.TaskGroups[0].Tasks[0].Resources.Devices = structs.ResourceDevices{
|
|
|
|
&structs.RequestedDevice{
|
|
|
|
Name: "gpu",
|
|
|
|
Count: 2,
|
|
|
|
},
|
|
|
|
}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j11d1, j11d2, name).modified)
|
2019-11-07 17:51:15 +00:00
|
|
|
|
2016-05-06 04:32:01 +00:00
|
|
|
j13 := mock.Job()
|
2020-08-29 01:40:53 +00:00
|
|
|
j13.TaskGroups[0].Networks[0].DynamicPorts[0].Label = "foobar"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j13, name).modified)
|
2016-05-06 04:32:01 +00:00
|
|
|
|
|
|
|
j14 := mock.Job()
|
2020-08-29 01:40:53 +00:00
|
|
|
j14.TaskGroups[0].Networks[0].ReservedPorts = []structs.Port{{Label: "foo", Value: 1312}}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j14, name).modified)
|
2016-09-21 18:29:50 +00:00
|
|
|
|
|
|
|
j15 := mock.Job()
|
|
|
|
j15.TaskGroups[0].Tasks[0].Vault = &structs.Vault{Policies: []string{"foo"}}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j15, name).modified)
|
2016-09-21 21:00:02 +00:00
|
|
|
|
|
|
|
j16 := mock.Job()
|
|
|
|
j16.TaskGroups[0].EphemeralDisk.Sticky = true
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j16, name).modified)
|
2016-12-16 01:08:38 +00:00
|
|
|
|
|
|
|
// Change group meta
|
|
|
|
j17 := mock.Job()
|
|
|
|
j17.TaskGroups[0].Meta["j17_test"] = "roll_baby_roll"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j17, name).modified)
|
2016-12-16 01:08:38 +00:00
|
|
|
|
|
|
|
// Change job meta
|
|
|
|
j18 := mock.Job()
|
|
|
|
j18.Meta["j18_test"] = "roll_baby_roll"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j18, name).modified)
|
2020-03-21 20:51:10 +00:00
|
|
|
|
|
|
|
// Change network mode
|
|
|
|
j19 := mock.Job()
|
2021-03-20 02:25:50 +00:00
|
|
|
j19.TaskGroups[0].Networks[0].Mode = "bridge"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j19, name).modified)
|
2020-03-21 20:51:10 +00:00
|
|
|
|
2021-03-20 02:25:50 +00:00
|
|
|
// Change cores resource
|
2020-03-21 20:51:10 +00:00
|
|
|
j20 := mock.Job()
|
2021-03-20 02:25:50 +00:00
|
|
|
j20.TaskGroups[0].Tasks[0].Resources.CPU = 0
|
|
|
|
j20.TaskGroups[0].Tasks[0].Resources.Cores = 2
|
|
|
|
j21 := mock.Job()
|
|
|
|
j21.TaskGroups[0].Tasks[0].Resources.CPU = 0
|
|
|
|
j21.TaskGroups[0].Tasks[0].Resources.Cores = 4
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j20, j21, name).modified)
|
2020-03-21 20:51:10 +00:00
|
|
|
|
2022-01-10 15:19:07 +00:00
|
|
|
// Compare identical Template wait configs
|
|
|
|
j22 := mock.Job()
|
|
|
|
j22.TaskGroups[0].Tasks[0].Templates = []*structs.Template{
|
|
|
|
{
|
|
|
|
Wait: &structs.WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Min: pointer.Of(5 * time.Second),
|
|
|
|
Max: pointer.Of(5 * time.Second),
|
2022-01-10 15:19:07 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
j23 := mock.Job()
|
|
|
|
j23.TaskGroups[0].Tasks[0].Templates = []*structs.Template{
|
|
|
|
{
|
|
|
|
Wait: &structs.WaitConfig{
|
2022-08-17 16:26:34 +00:00
|
|
|
Min: pointer.Of(5 * time.Second),
|
|
|
|
Max: pointer.Of(5 * time.Second),
|
2022-01-10 15:19:07 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.False(t, tasksUpdated(j22, j23, name).modified)
|
2022-01-10 15:19:07 +00:00
|
|
|
// Compare changed Template wait configs
|
2022-08-17 16:26:34 +00:00
|
|
|
j23.TaskGroups[0].Tasks[0].Templates[0].Wait.Max = pointer.Of(10 * time.Second)
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j22, j23, name).modified)
|
2022-05-13 15:34:04 +00:00
|
|
|
|
|
|
|
// Add a volume
|
|
|
|
j24 := mock.Job()
|
|
|
|
j25 := j24.Copy()
|
|
|
|
j25.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{
|
|
|
|
"myvolume": {
|
|
|
|
Name: "myvolume",
|
|
|
|
Type: "csi",
|
|
|
|
Source: "test-volume[0]",
|
|
|
|
}}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j24, j25, name).modified)
|
2022-05-13 15:34:04 +00:00
|
|
|
|
|
|
|
// Alter a volume
|
|
|
|
j26 := j25.Copy()
|
|
|
|
j26.TaskGroups[0].Volumes["myvolume"].ReadOnly = true
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j25, j26, name).modified)
|
2022-05-13 15:34:04 +00:00
|
|
|
|
|
|
|
// Alter a CSI plugin
|
|
|
|
j27 := mock.Job()
|
|
|
|
j27.TaskGroups[0].Tasks[0].CSIPluginConfig = &structs.TaskCSIPluginConfig{
|
|
|
|
ID: "myplugin",
|
|
|
|
Type: "node",
|
|
|
|
}
|
|
|
|
j28 := j27.Copy()
|
|
|
|
j28.TaskGroups[0].Tasks[0].CSIPluginConfig.Type = "monolith"
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j27, j28, name).modified)
|
2022-11-04 17:23:01 +00:00
|
|
|
|
|
|
|
// Compare identical Template ErrMissingKey
|
|
|
|
j29 := mock.Job()
|
|
|
|
j29.TaskGroups[0].Tasks[0].Templates = []*structs.Template{
|
|
|
|
{
|
|
|
|
ErrMissingKey: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
j30 := mock.Job()
|
|
|
|
j30.TaskGroups[0].Tasks[0].Templates = []*structs.Template{
|
|
|
|
{
|
|
|
|
ErrMissingKey: false,
|
|
|
|
},
|
|
|
|
}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.False(t, tasksUpdated(j29, j30, name).modified)
|
2022-11-04 17:23:01 +00:00
|
|
|
|
|
|
|
// Compare changed Template ErrMissingKey
|
|
|
|
j30.TaskGroups[0].Tasks[0].Templates[0].ErrMissingKey = true
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j29, j30, name).modified)
|
2015-09-07 19:25:23 +00:00
|
|
|
}
|
2015-10-16 21:00:51 +00:00
|
|
|
|
2020-10-05 19:13:39 +00:00
|
|
|
func TestTasksUpdated_connectServiceUpdated(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-10-05 19:13:39 +00:00
|
|
|
servicesA := []*structs.Service{{
|
|
|
|
Name: "service1",
|
|
|
|
PortLabel: "1111",
|
|
|
|
Connect: &structs.ConsulConnect{
|
|
|
|
SidecarService: &structs.ConsulSidecarService{
|
|
|
|
Tags: []string{"a"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}}
|
|
|
|
|
|
|
|
t.Run("service not updated", func(t *testing.T) {
|
|
|
|
servicesB := []*structs.Service{{
|
|
|
|
Name: "service0",
|
|
|
|
}, {
|
|
|
|
Name: "service1",
|
|
|
|
PortLabel: "1111",
|
|
|
|
Connect: &structs.ConsulConnect{
|
|
|
|
SidecarService: &structs.ConsulSidecarService{
|
|
|
|
Tags: []string{"a"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
Name: "service2",
|
|
|
|
}}
|
2023-03-14 14:46:00 +00:00
|
|
|
updated := connectServiceUpdated(servicesA, servicesB).modified
|
|
|
|
must.False(t, updated)
|
2020-10-05 19:13:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("service connect tags updated", func(t *testing.T) {
|
|
|
|
servicesB := []*structs.Service{{
|
|
|
|
Name: "service0",
|
|
|
|
}, {
|
|
|
|
Name: "service1",
|
|
|
|
PortLabel: "1111",
|
|
|
|
Connect: &structs.ConsulConnect{
|
|
|
|
SidecarService: &structs.ConsulSidecarService{
|
|
|
|
Tags: []string{"b"}, // in-place update
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}}
|
2023-03-14 14:46:00 +00:00
|
|
|
updated := connectServiceUpdated(servicesA, servicesB).modified
|
|
|
|
must.False(t, updated)
|
2020-10-05 19:13:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("service connect port updated", func(t *testing.T) {
|
|
|
|
servicesB := []*structs.Service{{
|
|
|
|
Name: "service0",
|
|
|
|
}, {
|
|
|
|
Name: "service1",
|
|
|
|
PortLabel: "1111",
|
|
|
|
Connect: &structs.ConsulConnect{
|
|
|
|
SidecarService: &structs.ConsulSidecarService{
|
|
|
|
Tags: []string{"a"},
|
|
|
|
Port: "2222", // destructive update
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}}
|
2023-03-14 14:46:00 +00:00
|
|
|
updated := connectServiceUpdated(servicesA, servicesB).modified
|
|
|
|
must.True(t, updated)
|
2020-10-05 19:13:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("service port label updated", func(t *testing.T) {
|
|
|
|
servicesB := []*structs.Service{{
|
|
|
|
Name: "service0",
|
|
|
|
}, {
|
|
|
|
Name: "service1",
|
|
|
|
PortLabel: "1112", // destructive update
|
|
|
|
Connect: &structs.ConsulConnect{
|
|
|
|
SidecarService: &structs.ConsulSidecarService{
|
|
|
|
Tags: []string{"1"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}}
|
2023-03-14 14:46:00 +00:00
|
|
|
updated := connectServiceUpdated(servicesA, servicesB).modified
|
|
|
|
must.True(t, updated)
|
2020-10-05 19:13:39 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-02-01 20:56:43 +00:00
|
|
|
func TestNetworkUpdated(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2023-03-14 14:46:00 +00:00
|
|
|
|
2021-02-01 20:56:43 +00:00
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
a []*structs.NetworkResource
|
|
|
|
b []*structs.NetworkResource
|
|
|
|
updated bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "mode updated",
|
|
|
|
a: []*structs.NetworkResource{
|
|
|
|
{Mode: "host"},
|
|
|
|
},
|
|
|
|
b: []*structs.NetworkResource{
|
|
|
|
{Mode: "bridge"},
|
|
|
|
},
|
|
|
|
updated: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "host_network updated",
|
|
|
|
a: []*structs.NetworkResource{
|
|
|
|
{DynamicPorts: []structs.Port{
|
|
|
|
{Label: "http", To: 8080},
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
b: []*structs.NetworkResource{
|
|
|
|
{DynamicPorts: []structs.Port{
|
|
|
|
{Label: "http", To: 8080, HostNetwork: "public"},
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
updated: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "port.To updated",
|
|
|
|
a: []*structs.NetworkResource{
|
|
|
|
{DynamicPorts: []structs.Port{
|
|
|
|
{Label: "http", To: 8080},
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
b: []*structs.NetworkResource{
|
|
|
|
{DynamicPorts: []structs.Port{
|
|
|
|
{Label: "http", To: 8088},
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
updated: true,
|
|
|
|
},
|
2021-09-16 06:13:09 +00:00
|
|
|
{
|
|
|
|
name: "hostname updated",
|
|
|
|
a: []*structs.NetworkResource{
|
|
|
|
{Hostname: "foo"},
|
|
|
|
},
|
|
|
|
b: []*structs.NetworkResource{
|
|
|
|
{Hostname: "bar"},
|
|
|
|
},
|
|
|
|
updated: true,
|
|
|
|
},
|
2021-02-01 20:56:43 +00:00
|
|
|
}
|
|
|
|
|
2023-03-14 14:46:00 +00:00
|
|
|
for _, tc := range cases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
must.Eq(t, tc.updated, networkUpdated(tc.a, tc.b).modified)
|
2021-02-01 20:56:43 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-16 23:35:55 +00:00
|
|
|
func TestSetStatus(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-10-16 23:35:55 +00:00
|
|
|
h := NewHarness(t)
|
2018-09-15 23:23:13 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2015-10-16 23:35:55 +00:00
|
|
|
eval := mock.Eval()
|
|
|
|
status := "a"
|
|
|
|
desc := "b"
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, setStatus(logger, h, eval, nil, nil, nil, status, desc, nil, ""))
|
|
|
|
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
newEval := h.Evals[0]
|
2019-11-07 22:39:47 +00:00
|
|
|
require.True(t, newEval.ID == eval.ID && newEval.Status == status && newEval.StatusDescription == desc,
|
|
|
|
"setStatus() submited invalid eval: %v", newEval)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
2016-05-19 20:09:52 +00:00
|
|
|
// Test next evals
|
2015-10-16 23:35:55 +00:00
|
|
|
h = NewHarness(t)
|
|
|
|
next := mock.Eval()
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, setStatus(logger, h, eval, next, nil, nil, status, desc, nil, ""))
|
|
|
|
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
newEval = h.Evals[0]
|
2019-11-07 22:39:47 +00:00
|
|
|
require.Equal(t, next.ID, newEval.NextEval, "setStatus() didn't set nextEval correctly: %v", newEval)
|
2016-05-19 20:09:52 +00:00
|
|
|
|
|
|
|
// Test blocked evals
|
|
|
|
h = NewHarness(t)
|
|
|
|
blocked := mock.Eval()
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, setStatus(logger, h, eval, nil, blocked, nil, status, desc, nil, ""))
|
|
|
|
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
2016-05-19 20:09:52 +00:00
|
|
|
|
|
|
|
newEval = h.Evals[0]
|
2019-11-07 22:39:47 +00:00
|
|
|
require.Equal(t, blocked.ID, newEval.BlockedEval, "setStatus() didn't set BlockedEval correctly: %v", newEval)
|
2016-05-27 18:26:14 +00:00
|
|
|
|
|
|
|
// Test metrics
|
|
|
|
h = NewHarness(t)
|
|
|
|
metrics := map[string]*structs.AllocMetric{"foo": nil}
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, nil, ""))
|
|
|
|
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
2016-05-27 18:26:14 +00:00
|
|
|
|
|
|
|
newEval = h.Evals[0]
|
2019-11-07 22:39:47 +00:00
|
|
|
require.True(t, reflect.DeepEqual(newEval.FailedTGAllocs, metrics),
|
|
|
|
"setStatus() didn't set failed task group metrics correctly: %v", newEval)
|
2016-07-18 22:04:05 +00:00
|
|
|
|
|
|
|
// Test queued allocations
|
|
|
|
h = NewHarness(t)
|
|
|
|
queuedAllocs := map[string]int{"web": 1}
|
|
|
|
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, ""))
|
|
|
|
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
2016-07-18 22:04:05 +00:00
|
|
|
|
|
|
|
newEval = h.Evals[0]
|
2019-11-07 22:39:47 +00:00
|
|
|
require.True(t, reflect.DeepEqual(newEval.QueuedAllocations, queuedAllocs), "setStatus() didn't set failed task group metrics correctly: %v", newEval)
|
2017-07-06 00:13:45 +00:00
|
|
|
|
|
|
|
h = NewHarness(t)
|
2017-09-29 16:58:48 +00:00
|
|
|
dID := uuid.Generate()
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, dID))
|
|
|
|
require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
|
2017-07-06 00:13:45 +00:00
|
|
|
|
|
|
|
newEval = h.Evals[0]
|
2019-11-07 22:39:47 +00:00
|
|
|
require.Equal(t, dID, newEval.DeploymentID, "setStatus() didn't set deployment id correctly: %v", newEval)
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-10-16 23:35:55 +00:00
|
|
|
state, ctx := testContext(t)
|
|
|
|
eval := mock.Eval()
|
|
|
|
job := mock.Job()
|
|
|
|
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Register an alloc
|
|
|
|
alloc := &structs.Allocation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-07 23:56:15 +00:00
|
|
|
EvalID: eval.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2018-10-03 16:47:18 +00:00
|
|
|
AllocatedResources: &structs.AllocatedResources{
|
|
|
|
Tasks: map[string]*structs.AllocatedTaskResources{
|
|
|
|
"web": {
|
|
|
|
Cpu: structs.AllocatedCpuResources{
|
|
|
|
CpuShares: 2048,
|
|
|
|
},
|
|
|
|
Memory: structs.AllocatedMemoryResources{
|
|
|
|
MemoryMB: 2048,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-10-16 23:35:55 +00:00
|
|
|
},
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
2016-07-22 21:53:49 +00:00
|
|
|
TaskGroup: "web",
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Create a new task group that prevents in-place updates.
|
|
|
|
tg := &structs.TaskGroup{}
|
|
|
|
*tg = *job.TaskGroups[0]
|
2018-10-10 18:32:56 +00:00
|
|
|
task := &structs.Task{
|
|
|
|
Name: "FOO",
|
|
|
|
Resources: &structs.Resources{},
|
|
|
|
}
|
2015-10-16 23:35:55 +00:00
|
|
|
tg.Tasks = nil
|
|
|
|
tg.Tasks = append(tg.Tasks, task)
|
|
|
|
|
|
|
|
updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
|
2015-10-17 00:05:23 +00:00
|
|
|
stack := NewGenericStack(false, ctx)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Do the inplace update.
|
2016-05-17 22:37:37 +00:00
|
|
|
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
2019-11-07 22:39:47 +00:00
|
|
|
require.True(t, len(unplaced) == 1 && len(inplace) == 0, "inplaceUpdate incorrectly did an inplace update")
|
|
|
|
require.Empty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
|
2021-01-08 14:00:41 +00:00
|
|
|
func TestInplaceUpdate_AllocatedResources(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2021-01-08 14:00:41 +00:00
|
|
|
state, ctx := testContext(t)
|
|
|
|
eval := mock.Eval()
|
|
|
|
job := mock.Job()
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
|
|
|
|
|
|
|
// Register an alloc
|
|
|
|
alloc := &structs.Allocation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: eval.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
|
|
|
AllocatedResources: &structs.AllocatedResources{
|
|
|
|
Shared: structs.AllocatedSharedResources{
|
|
|
|
Ports: structs.AllocatedPorts{
|
|
|
|
{
|
|
|
|
Label: "api-port",
|
|
|
|
Value: 19910,
|
|
|
|
To: 8080,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
TaskGroup: "web",
|
|
|
|
}
|
|
|
|
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
|
|
|
require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
|
|
|
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
|
|
|
|
|
|
|
// Update TG to add a new service (inplace)
|
|
|
|
tg := job.TaskGroups[0]
|
|
|
|
tg.Services = []*structs.Service{
|
|
|
|
{
|
|
|
|
Name: "tg-service",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
|
|
|
|
stack := NewGenericStack(false, ctx)
|
|
|
|
|
|
|
|
// Do the inplace update.
|
|
|
|
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
|
|
|
|
|
|
|
require.True(t, len(unplaced) == 0 && len(inplace) == 1, "inplaceUpdate incorrectly did not perform an inplace update")
|
|
|
|
require.NotEmpty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
|
|
|
|
require.NotEmpty(t, ctx.plan.NodeAllocation[node.ID][0].AllocatedResources.Shared.Ports)
|
|
|
|
|
|
|
|
port, ok := ctx.plan.NodeAllocation[node.ID][0].AllocatedResources.Shared.Ports.Get("api-port")
|
|
|
|
require.True(t, ok)
|
|
|
|
require.Equal(t, 19910, port.Value)
|
|
|
|
}
|
|
|
|
|
2015-10-16 23:35:55 +00:00
|
|
|
func TestInplaceUpdate_NoMatch(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-10-16 23:35:55 +00:00
|
|
|
state, ctx := testContext(t)
|
|
|
|
eval := mock.Eval()
|
|
|
|
job := mock.Job()
|
|
|
|
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Register an alloc
|
|
|
|
alloc := &structs.Allocation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-07 23:56:15 +00:00
|
|
|
EvalID: eval.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2018-10-03 16:47:18 +00:00
|
|
|
AllocatedResources: &structs.AllocatedResources{
|
|
|
|
Tasks: map[string]*structs.AllocatedTaskResources{
|
|
|
|
"web": {
|
|
|
|
Cpu: structs.AllocatedCpuResources{
|
|
|
|
CpuShares: 2048,
|
|
|
|
},
|
|
|
|
Memory: structs.AllocatedMemoryResources{
|
|
|
|
MemoryMB: 2048,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-10-16 23:35:55 +00:00
|
|
|
},
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
2016-07-22 21:53:49 +00:00
|
|
|
TaskGroup: "web",
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Create a new task group that requires too much resources.
|
|
|
|
tg := &structs.TaskGroup{}
|
|
|
|
*tg = *job.TaskGroups[0]
|
|
|
|
resource := &structs.Resources{CPU: 9999}
|
|
|
|
tg.Tasks[0].Resources = resource
|
|
|
|
|
|
|
|
updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
|
2015-10-17 00:05:23 +00:00
|
|
|
stack := NewGenericStack(false, ctx)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Do the inplace update.
|
2016-05-17 22:37:37 +00:00
|
|
|
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
2019-11-07 22:39:47 +00:00
|
|
|
require.True(t, len(unplaced) == 1 && len(inplace) == 0, "inplaceUpdate incorrectly did an inplace update")
|
|
|
|
require.Empty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestInplaceUpdate_Success(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-10-16 23:35:55 +00:00
|
|
|
state, ctx := testContext(t)
|
|
|
|
eval := mock.Eval()
|
|
|
|
job := mock.Job()
|
|
|
|
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Register an alloc
|
|
|
|
alloc := &structs.Allocation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-12-15 01:06:58 +00:00
|
|
|
EvalID: eval.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
|
|
|
TaskGroup: job.TaskGroups[0].Name,
|
2018-10-03 16:47:18 +00:00
|
|
|
AllocatedResources: &structs.AllocatedResources{
|
|
|
|
Tasks: map[string]*structs.AllocatedTaskResources{
|
|
|
|
"web": {
|
|
|
|
Cpu: structs.AllocatedCpuResources{
|
|
|
|
CpuShares: 2048,
|
|
|
|
},
|
|
|
|
Memory: structs.AllocatedMemoryResources{
|
|
|
|
MemoryMB: 2048,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2015-10-16 23:35:55 +00:00
|
|
|
},
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
}
|
|
|
|
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)))
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Create a new task group that updates the resources.
|
|
|
|
tg := &structs.TaskGroup{}
|
|
|
|
*tg = *job.TaskGroups[0]
|
|
|
|
resource := &structs.Resources{CPU: 737}
|
|
|
|
tg.Tasks[0].Resources = resource
|
2016-06-12 23:36:49 +00:00
|
|
|
newServices := []*structs.Service{
|
2015-12-15 17:14:32 +00:00
|
|
|
{
|
|
|
|
Name: "dummy-service",
|
|
|
|
PortLabel: "http",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "dummy-service2",
|
|
|
|
PortLabel: "http",
|
|
|
|
},
|
|
|
|
}
|
2015-12-15 18:43:56 +00:00
|
|
|
|
|
|
|
// Delete service 2
|
2016-06-12 23:36:49 +00:00
|
|
|
tg.Tasks[0].Services = tg.Tasks[0].Services[:1]
|
2015-12-15 18:43:56 +00:00
|
|
|
|
|
|
|
// Add the new services
|
2016-06-12 23:36:49 +00:00
|
|
|
tg.Tasks[0].Services = append(tg.Tasks[0].Services, newServices...)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
|
2015-10-17 00:05:23 +00:00
|
|
|
stack := NewGenericStack(false, ctx)
|
2015-10-22 23:45:03 +00:00
|
|
|
stack.SetJob(job)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Do the inplace update.
|
2016-05-17 22:37:37 +00:00
|
|
|
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
2019-11-07 22:39:47 +00:00
|
|
|
require.True(t, len(unplaced) == 0 && len(inplace) == 1, "inplaceUpdate did not do an inplace update")
|
|
|
|
require.Equal(t, 1, len(ctx.plan.NodeAllocation), "inplaceUpdate did not do an inplace update")
|
|
|
|
require.Equal(t, alloc.ID, inplace[0].Alloc.ID, "inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace)
|
2016-05-17 22:37:37 +00:00
|
|
|
|
2015-12-15 16:35:26 +00:00
|
|
|
// Get the alloc we inserted.
|
2016-06-09 06:51:12 +00:00
|
|
|
a := inplace[0].Alloc // TODO(sean@): Verify this is correct vs: ctx.plan.NodeAllocation[alloc.NodeID][0]
|
2019-11-07 22:39:47 +00:00
|
|
|
require.NotNil(t, a.Job)
|
|
|
|
require.Equal(t, 1, len(a.Job.TaskGroups))
|
|
|
|
require.Equal(t, 1, len(a.Job.TaskGroups[0].Tasks))
|
|
|
|
require.Equal(t, 3, len(a.Job.TaskGroups[0].Tasks[0].Services),
|
|
|
|
"Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services))
|
2016-06-09 06:51:12 +00:00
|
|
|
|
|
|
|
serviceNames := make(map[string]struct{}, 3)
|
2016-06-12 23:36:49 +00:00
|
|
|
for _, consulService := range a.Job.TaskGroups[0].Tasks[0].Services {
|
2016-06-09 06:51:12 +00:00
|
|
|
serviceNames[consulService.Name] = struct{}{}
|
|
|
|
}
|
2019-11-07 22:39:47 +00:00
|
|
|
require.Equal(t, 3, len(serviceNames))
|
2016-06-09 06:51:12 +00:00
|
|
|
|
|
|
|
for _, name := range []string{"dummy-service", "dummy-service2", "web-frontend"} {
|
|
|
|
if _, found := serviceNames[name]; !found {
|
|
|
|
t.Errorf("Expected consul service name missing: %v", name)
|
|
|
|
}
|
2015-12-15 18:43:56 +00:00
|
|
|
}
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
|
2023-03-07 15:05:59 +00:00
|
|
|
func TestInplaceUpdate_WildcardDatacenters(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
store, ctx := testContext(t)
|
|
|
|
eval := mock.Eval()
|
|
|
|
job := mock.Job()
|
|
|
|
job.Datacenters = []string{"*"}
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 900, node))
|
|
|
|
|
|
|
|
// Register an alloc
|
|
|
|
alloc := mock.AllocForNode(node)
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
must.NoError(t, store.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
|
|
|
must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc}))
|
|
|
|
|
|
|
|
updates := []allocTuple{{Alloc: alloc, TaskGroup: job.TaskGroups[0]}}
|
|
|
|
stack := NewGenericStack(false, ctx)
|
|
|
|
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
|
|
|
|
|
|
|
must.Len(t, 1, inplace,
|
|
|
|
must.Sprintf("inplaceUpdate should have an inplace update"))
|
|
|
|
must.Len(t, 0, unplaced)
|
|
|
|
must.MapNotEmpty(t, ctx.plan.NodeAllocation,
|
|
|
|
must.Sprintf("inplaceUpdate should have an inplace update"))
|
|
|
|
}
|
|
|
|
|
2023-06-07 14:39:03 +00:00
|
|
|
func TestInplaceUpdate_NodePools(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
store, ctx := testContext(t)
|
|
|
|
eval := mock.Eval()
|
|
|
|
job := mock.Job()
|
|
|
|
job.Datacenters = []string{"*"}
|
|
|
|
|
|
|
|
node1 := mock.Node()
|
|
|
|
must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1000, node1))
|
|
|
|
|
|
|
|
node2 := mock.Node()
|
|
|
|
node2.NodePool = "other"
|
|
|
|
must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 1001, node2))
|
|
|
|
|
|
|
|
// Register an alloc
|
|
|
|
alloc1 := mock.AllocForNode(node1)
|
|
|
|
alloc1.Job = job
|
|
|
|
alloc1.JobID = job.ID
|
|
|
|
must.NoError(t, store.UpsertJobSummary(1002, mock.JobSummary(alloc1.JobID)))
|
|
|
|
|
|
|
|
alloc2 := mock.AllocForNode(node2)
|
|
|
|
alloc2.Job = job
|
|
|
|
alloc2.JobID = job.ID
|
|
|
|
must.NoError(t, store.UpsertJobSummary(1003, mock.JobSummary(alloc2.JobID)))
|
|
|
|
|
|
|
|
t.Logf("alloc1=%s alloc2=%s", alloc1.ID, alloc2.ID)
|
|
|
|
|
|
|
|
must.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, 1004,
|
|
|
|
[]*structs.Allocation{alloc1, alloc2}))
|
|
|
|
|
|
|
|
updates := []allocTuple{
|
|
|
|
{Alloc: alloc1, TaskGroup: job.TaskGroups[0]},
|
|
|
|
{Alloc: alloc2, TaskGroup: job.TaskGroups[0]},
|
|
|
|
}
|
|
|
|
stack := NewGenericStack(false, ctx)
|
|
|
|
destructive, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
|
|
|
|
|
|
|
must.Len(t, 1, inplace, must.Sprint("should have an inplace update"))
|
|
|
|
must.Eq(t, alloc1.ID, inplace[0].Alloc.ID)
|
|
|
|
must.Len(t, 1, ctx.plan.NodeAllocation[node1.ID],
|
|
|
|
must.Sprint("NodeAllocation should have an inplace update for node1"))
|
|
|
|
|
|
|
|
// note that NodeUpdate with the new alloc won't be populated here yet
|
|
|
|
must.Len(t, 1, destructive, must.Sprint("should have a destructive update"))
|
|
|
|
must.Eq(t, alloc2.ID, destructive[0].Alloc.ID)
|
|
|
|
}
|
|
|
|
|
2023-02-03 17:29:39 +00:00
|
|
|
func TestUtil_connectUpdated(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2023-02-03 17:29:39 +00:00
|
|
|
t.Run("both nil", func(t *testing.T) {
|
2023-03-14 14:46:00 +00:00
|
|
|
must.False(t, connectUpdated(nil, nil).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("one nil", func(t *testing.T) {
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, connectUpdated(nil, new(structs.ConsulConnect)).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("native differ", func(t *testing.T) {
|
|
|
|
a := &structs.ConsulConnect{Native: true}
|
|
|
|
b := &structs.ConsulConnect{Native: false}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, connectUpdated(a, b).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("gateway differ", func(t *testing.T) {
|
|
|
|
a := &structs.ConsulConnect{Gateway: &structs.ConsulGateway{
|
|
|
|
Ingress: new(structs.ConsulIngressConfigEntry),
|
|
|
|
}}
|
|
|
|
b := &structs.ConsulConnect{Gateway: &structs.ConsulGateway{
|
|
|
|
Terminating: new(structs.ConsulTerminatingConfigEntry),
|
|
|
|
}}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, connectUpdated(a, b).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("sidecar task differ", func(t *testing.T) {
|
|
|
|
a := &structs.ConsulConnect{SidecarTask: &structs.SidecarTask{
|
|
|
|
Driver: "exec",
|
|
|
|
}}
|
|
|
|
b := &structs.ConsulConnect{SidecarTask: &structs.SidecarTask{
|
|
|
|
Driver: "docker",
|
|
|
|
}}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, connectUpdated(a, b).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
2015-10-16 23:35:55 +00:00
|
|
|
|
2023-02-03 17:29:39 +00:00
|
|
|
t.Run("sidecar service differ", func(t *testing.T) {
|
|
|
|
a := &structs.ConsulConnect{SidecarService: &structs.ConsulSidecarService{
|
|
|
|
Port: "1111",
|
|
|
|
}}
|
|
|
|
b := &structs.ConsulConnect{SidecarService: &structs.ConsulSidecarService{
|
|
|
|
Port: "2222",
|
|
|
|
}}
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, connectUpdated(a, b).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("same", func(t *testing.T) {
|
|
|
|
a := new(structs.ConsulConnect)
|
|
|
|
b := new(structs.ConsulConnect)
|
2023-03-14 14:46:00 +00:00
|
|
|
must.False(t, connectUpdated(a, b).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestUtil_connectSidecarServiceUpdated(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
t.Run("both nil", func(t *testing.T) {
|
2023-03-14 14:46:00 +00:00
|
|
|
require.False(t, connectSidecarServiceUpdated(nil, nil).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("one nil", func(t *testing.T) {
|
2023-03-14 14:46:00 +00:00
|
|
|
require.True(t, connectSidecarServiceUpdated(nil, new(structs.ConsulSidecarService)).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("ports differ", func(t *testing.T) {
|
|
|
|
a := &structs.ConsulSidecarService{Port: "1111"}
|
|
|
|
b := &structs.ConsulSidecarService{Port: "2222"}
|
2023-03-14 14:46:00 +00:00
|
|
|
require.True(t, connectSidecarServiceUpdated(a, b).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("same", func(t *testing.T) {
|
|
|
|
a := &structs.ConsulSidecarService{Port: "1111"}
|
|
|
|
b := &structs.ConsulSidecarService{Port: "1111"}
|
2023-03-14 14:46:00 +00:00
|
|
|
require.False(t, connectSidecarServiceUpdated(a, b).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTasksUpdated_Identity(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
j1 := mock.Job()
|
|
|
|
name := j1.TaskGroups[0].Name
|
|
|
|
j1.TaskGroups[0].Tasks[0].Identity = nil
|
|
|
|
|
|
|
|
j2 := j1.Copy()
|
|
|
|
|
2023-03-14 14:46:00 +00:00
|
|
|
must.False(t, tasksUpdated(j1, j2, name).modified)
|
2023-02-03 17:29:39 +00:00
|
|
|
|
|
|
|
// Set identity on j1 and assert update
|
|
|
|
j1.TaskGroups[0].Tasks[0].Identity = &structs.WorkloadIdentity{}
|
|
|
|
|
2023-03-14 14:46:00 +00:00
|
|
|
must.True(t, tasksUpdated(j1, j2, name).modified)
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
|
2015-10-16 21:00:51 +00:00
|
|
|
func TestTaskGroupConstraints(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-10-27 21:31:14 +00:00
|
|
|
constr := &structs.Constraint{RTarget: "bar"}
|
2015-10-16 21:00:51 +00:00
|
|
|
constr2 := &structs.Constraint{LTarget: "foo"}
|
2015-10-27 21:31:14 +00:00
|
|
|
constr3 := &structs.Constraint{Operand: "<"}
|
2015-10-16 21:00:51 +00:00
|
|
|
|
|
|
|
tg := &structs.TaskGroup{
|
2016-09-14 22:43:42 +00:00
|
|
|
Name: "web",
|
|
|
|
Count: 10,
|
|
|
|
Constraints: []*structs.Constraint{constr},
|
|
|
|
EphemeralDisk: &structs.EphemeralDisk{},
|
2015-10-16 21:00:51 +00:00
|
|
|
Tasks: []*structs.Task{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-10-16 21:00:51 +00:00
|
|
|
Driver: "exec",
|
|
|
|
Resources: &structs.Resources{
|
|
|
|
CPU: 500,
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
Constraints: []*structs.Constraint{constr2},
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-10-16 21:00:51 +00:00
|
|
|
Driver: "docker",
|
|
|
|
Resources: &structs.Resources{
|
|
|
|
CPU: 500,
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
Constraints: []*structs.Constraint{constr3},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build the expected values.
|
|
|
|
expConstr := []*structs.Constraint{constr, constr2, constr3}
|
2017-09-26 22:26:33 +00:00
|
|
|
expDrivers := map[string]struct{}{"exec": {}, "docker": {}}
|
2015-10-16 21:00:51 +00:00
|
|
|
|
|
|
|
actConstrains := taskGroupConstraints(tg)
|
2019-11-07 22:39:47 +00:00
|
|
|
require.True(t, reflect.DeepEqual(actConstrains.constraints, expConstr),
|
|
|
|
"taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr)
|
|
|
|
require.True(t, reflect.DeepEqual(actConstrains.drivers, expDrivers),
|
|
|
|
"taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers)
|
2015-10-16 21:00:51 +00:00
|
|
|
}
|
2015-11-13 01:47:51 +00:00
|
|
|
|
2016-02-22 18:38:04 +00:00
|
|
|
func TestProgressMade(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-02-22 18:38:04 +00:00
|
|
|
noopPlan := &structs.PlanResult{}
|
2019-11-07 22:39:47 +00:00
|
|
|
require.False(t, progressMade(nil) || progressMade(noopPlan), "no progress plan marked as making progress")
|
2016-02-22 18:38:04 +00:00
|
|
|
|
|
|
|
m := map[string][]*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
"foo": {mock.Alloc()},
|
2016-02-22 18:38:04 +00:00
|
|
|
}
|
|
|
|
both := &structs.PlanResult{
|
|
|
|
NodeAllocation: m,
|
2016-02-22 21:24:26 +00:00
|
|
|
NodeUpdate: m,
|
2016-02-22 18:38:04 +00:00
|
|
|
}
|
2016-02-22 21:24:26 +00:00
|
|
|
update := &structs.PlanResult{NodeUpdate: m}
|
|
|
|
alloc := &structs.PlanResult{NodeAllocation: m}
|
2017-07-06 16:55:39 +00:00
|
|
|
deployment := &structs.PlanResult{Deployment: mock.Deployment()}
|
|
|
|
deploymentUpdates := &structs.PlanResult{
|
|
|
|
DeploymentUpdates: []*structs.DeploymentStatusUpdate{
|
2017-09-29 16:58:48 +00:00
|
|
|
{DeploymentID: uuid.Generate()},
|
2017-07-06 16:55:39 +00:00
|
|
|
},
|
|
|
|
}
|
2019-11-07 22:39:47 +00:00
|
|
|
|
|
|
|
require.True(t, progressMade(both) && progressMade(update) && progressMade(alloc) &&
|
|
|
|
progressMade(deployment) && progressMade(deploymentUpdates))
|
2016-02-22 18:38:04 +00:00
|
|
|
}
|
2016-05-05 18:21:58 +00:00
|
|
|
|
|
|
|
func TestDesiredUpdates(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-05-05 18:21:58 +00:00
|
|
|
tg1 := &structs.TaskGroup{Name: "foo"}
|
|
|
|
tg2 := &structs.TaskGroup{Name: "bar"}
|
2016-05-13 18:53:11 +00:00
|
|
|
a2 := &structs.Allocation{TaskGroup: "bar"}
|
2016-05-05 18:21:58 +00:00
|
|
|
|
|
|
|
place := []allocTuple{
|
2017-09-26 22:26:33 +00:00
|
|
|
{TaskGroup: tg1},
|
|
|
|
{TaskGroup: tg1},
|
|
|
|
{TaskGroup: tg1},
|
|
|
|
{TaskGroup: tg2},
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
|
|
|
stop := []allocTuple{
|
2017-09-26 22:26:33 +00:00
|
|
|
{TaskGroup: tg2, Alloc: a2},
|
|
|
|
{TaskGroup: tg2, Alloc: a2},
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
|
|
|
ignore := []allocTuple{
|
2017-09-26 22:26:33 +00:00
|
|
|
{TaskGroup: tg1},
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
|
|
|
migrate := []allocTuple{
|
2017-09-26 22:26:33 +00:00
|
|
|
{TaskGroup: tg2},
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
|
|
|
inplace := []allocTuple{
|
2017-09-26 22:26:33 +00:00
|
|
|
{TaskGroup: tg1},
|
|
|
|
{TaskGroup: tg1},
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
|
|
|
destructive := []allocTuple{
|
2017-09-26 22:26:33 +00:00
|
|
|
{TaskGroup: tg1},
|
|
|
|
{TaskGroup: tg2},
|
|
|
|
{TaskGroup: tg2},
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
|
|
|
diff := &diffResult{
|
|
|
|
place: place,
|
|
|
|
stop: stop,
|
|
|
|
ignore: ignore,
|
|
|
|
migrate: migrate,
|
|
|
|
}
|
|
|
|
|
|
|
|
expected := map[string]*structs.DesiredUpdates{
|
|
|
|
"foo": {
|
|
|
|
Place: 3,
|
|
|
|
Ignore: 1,
|
|
|
|
InPlaceUpdate: 2,
|
|
|
|
DestructiveUpdate: 1,
|
|
|
|
},
|
|
|
|
"bar": {
|
|
|
|
Place: 1,
|
|
|
|
Stop: 2,
|
|
|
|
Migrate: 1,
|
|
|
|
DestructiveUpdate: 2,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
desired := desiredUpdates(diff, inplace, destructive)
|
2019-11-07 22:39:47 +00:00
|
|
|
require.True(t, reflect.DeepEqual(desired, expected), "desiredUpdates() returned %#v; want %#v", desired, expected)
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
2016-07-25 21:56:38 +00:00
|
|
|
|
|
|
|
func TestUtil_AdjustQueuedAllocations(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-09-15 23:23:13 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2016-07-25 21:56:38 +00:00
|
|
|
alloc1 := mock.Alloc()
|
|
|
|
alloc2 := mock.Alloc()
|
|
|
|
alloc2.CreateIndex = 4
|
2017-01-08 22:14:35 +00:00
|
|
|
alloc2.ModifyIndex = 4
|
2016-07-25 21:56:38 +00:00
|
|
|
alloc3 := mock.Alloc()
|
|
|
|
alloc3.CreateIndex = 3
|
2017-01-08 22:14:35 +00:00
|
|
|
alloc3.ModifyIndex = 5
|
2016-07-25 21:56:38 +00:00
|
|
|
alloc4 := mock.Alloc()
|
|
|
|
alloc4.CreateIndex = 6
|
2017-01-08 22:14:35 +00:00
|
|
|
alloc4.ModifyIndex = 8
|
2016-07-25 21:56:38 +00:00
|
|
|
|
|
|
|
planResult := structs.PlanResult{
|
|
|
|
NodeUpdate: map[string][]*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
"node-1": {alloc1},
|
2016-07-25 21:56:38 +00:00
|
|
|
},
|
|
|
|
NodeAllocation: map[string][]*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
"node-1": {
|
2016-07-25 21:56:38 +00:00
|
|
|
alloc2,
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
"node-2": {
|
2016-07-25 21:56:38 +00:00
|
|
|
alloc3, alloc4,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
RefreshIndex: 3,
|
2017-01-08 22:14:35 +00:00
|
|
|
AllocIndex: 16, // Should not be considered
|
2016-07-25 21:56:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
queuedAllocs := map[string]int{"web": 2}
|
|
|
|
adjustQueuedAllocations(logger, &planResult, queuedAllocs)
|
|
|
|
|
2019-11-07 22:39:47 +00:00
|
|
|
require.Equal(t, 1, queuedAllocs["web"])
|
2016-07-25 21:56:38 +00:00
|
|
|
}
|
2016-08-09 20:11:58 +00:00
|
|
|
|
|
|
|
func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-08-09 20:11:58 +00:00
|
|
|
node := mock.Node()
|
2018-03-30 21:17:41 +00:00
|
|
|
node.Status = structs.NodeStatusDown
|
2016-08-09 20:11:58 +00:00
|
|
|
alloc1 := mock.Alloc()
|
|
|
|
alloc1.NodeID = node.ID
|
|
|
|
alloc1.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
|
|
|
|
alloc2 := mock.Alloc()
|
|
|
|
alloc2.NodeID = node.ID
|
|
|
|
alloc2.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
alloc2.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
|
|
|
|
alloc3 := mock.Alloc()
|
|
|
|
alloc3.NodeID = node.ID
|
|
|
|
alloc3.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
alloc3.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
|
|
|
|
alloc4 := mock.Alloc()
|
|
|
|
alloc4.NodeID = node.ID
|
|
|
|
alloc4.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
alloc4.ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
|
|
|
|
allocs := []*structs.Allocation{alloc1, alloc2, alloc3, alloc4}
|
|
|
|
plan := structs.Plan{
|
|
|
|
NodeUpdate: make(map[string][]*structs.Allocation),
|
|
|
|
}
|
|
|
|
tainted := map[string]*structs.Node{node.ID: node}
|
|
|
|
|
|
|
|
updateNonTerminalAllocsToLost(&plan, tainted, allocs)
|
|
|
|
|
|
|
|
allocsLost := make([]string, 0, 2)
|
|
|
|
for _, alloc := range plan.NodeUpdate[node.ID] {
|
|
|
|
allocsLost = append(allocsLost, alloc.ID)
|
|
|
|
}
|
|
|
|
expected := []string{alloc1.ID, alloc2.ID}
|
2019-11-07 22:39:47 +00:00
|
|
|
require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected)
|
2018-03-30 21:17:41 +00:00
|
|
|
|
|
|
|
// Update the node status to ready and try again
|
|
|
|
plan = structs.Plan{
|
|
|
|
NodeUpdate: make(map[string][]*structs.Allocation),
|
|
|
|
}
|
|
|
|
node.Status = structs.NodeStatusReady
|
|
|
|
updateNonTerminalAllocsToLost(&plan, tainted, allocs)
|
|
|
|
|
|
|
|
allocsLost = make([]string, 0, 2)
|
|
|
|
for _, alloc := range plan.NodeUpdate[node.ID] {
|
|
|
|
allocsLost = append(allocsLost, alloc.ID)
|
|
|
|
}
|
|
|
|
expected = []string{}
|
2019-11-07 22:39:47 +00:00
|
|
|
require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected)
|
2016-08-09 20:11:58 +00:00
|
|
|
}
|
2023-07-28 18:54:00 +00:00
|
|
|
|
|
|
|
func TestTaskGroupUpdated_Restart(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
j1 := mock.Job()
|
|
|
|
name := j1.TaskGroups[0].Name
|
|
|
|
j2 := j1.Copy()
|
|
|
|
j3 := j1.Copy()
|
|
|
|
|
|
|
|
must.False(t, tasksUpdated(j1, j2, name).modified)
|
|
|
|
j2.TaskGroups[0].RestartPolicy.RenderTemplates = true
|
|
|
|
must.True(t, tasksUpdated(j1, j2, name).modified)
|
|
|
|
|
|
|
|
j3.TaskGroups[0].Tasks[0].RestartPolicy.RenderTemplates = true
|
|
|
|
must.True(t, tasksUpdated(j1, j3, name).modified)
|
|
|
|
}
|