2015-08-11 21:54:21 +00:00
|
|
|
package scheduler
|
|
|
|
|
|
|
|
import (
|
2015-09-07 02:47:02 +00:00
|
|
|
"fmt"
|
2016-05-05 18:21:58 +00:00
|
|
|
"reflect"
|
2016-08-09 21:48:25 +00:00
|
|
|
"sort"
|
2015-08-11 21:54:21 +00:00
|
|
|
"testing"
|
2015-09-07 22:17:39 +00:00
|
|
|
"time"
|
2015-08-11 21:54:21 +00:00
|
|
|
|
2017-02-08 05:22:48 +00:00
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
2022-03-15 12:42:43 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2023-02-02 21:18:01 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2022-08-17 16:26:34 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/pointer"
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2015-08-11 21:54:21 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2020-05-13 20:39:04 +00:00
|
|
|
"github.com/hashicorp/nomad/testutil"
|
2023-02-02 14:57:45 +00:00
|
|
|
"github.com/shoenig/test/must"
|
2017-07-31 18:17:35 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2018-01-04 22:20:32 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2022-09-21 19:53:25 +00:00
|
|
|
"golang.org/x/exp/slices"
|
2015-08-11 21:54:21 +00:00
|
|
|
)
|
|
|
|
|
2015-08-14 01:51:08 +00:00
|
|
|
func TestServiceSched_JobRegister(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-08-14 05:07:01 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2015-08-14 05:07:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2015-08-14 05:07:01 +00:00
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Create a mock evaluation to register the job
|
2015-08-14 05:07:01 +00:00
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-14 05:07:01 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-14 05:07:01 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2015-08-14 05:07:01 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
2016-05-05 18:21:58 +00:00
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
2016-05-19 20:09:52 +00:00
|
|
|
// Ensure the eval has no spawned blocked eval
|
2016-08-25 20:26:28 +00:00
|
|
|
if len(h.CreateEvals) != 0 {
|
2022-08-08 09:26:08 +00:00
|
|
|
t.Errorf("bad: %#v", h.CreateEvals)
|
2016-05-25 01:12:59 +00:00
|
|
|
if h.Evals[0].BlockedEval != "" {
|
2016-05-19 20:09:52 +00:00
|
|
|
t.Fatalf("bad: %#v", h.Evals[0])
|
|
|
|
}
|
2022-08-08 09:26:08 +00:00
|
|
|
t.FailNow()
|
2016-05-19 20:09:52 +00:00
|
|
|
}
|
|
|
|
|
2015-08-14 05:11:32 +00:00
|
|
|
// Ensure the plan allocated
|
2015-08-14 05:07:01 +00:00
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2015-08-14 05:07:01 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
2015-08-15 21:47:13 +00:00
|
|
|
|
2023-02-02 21:18:01 +00:00
|
|
|
// Ensure allocations have unique names derived from Job.ID
|
|
|
|
allocNames := helper.ConvertSlice(out,
|
|
|
|
func(alloc *structs.Allocation) string { return alloc.Name })
|
|
|
|
expectAllocNames := []string{}
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
expectAllocNames = append(expectAllocNames, fmt.Sprintf("%s.web[%d]", job.ID, i))
|
|
|
|
}
|
|
|
|
must.SliceContainsAll(t, expectAllocNames, allocNames)
|
|
|
|
|
2015-11-24 00:32:30 +00:00
|
|
|
// Ensure different ports were used.
|
2019-01-05 00:08:47 +00:00
|
|
|
used := make(map[int]map[string]struct{})
|
2015-11-24 00:32:30 +00:00
|
|
|
for _, alloc := range out {
|
2020-08-29 01:40:53 +00:00
|
|
|
for _, port := range alloc.AllocatedResources.Shared.Ports {
|
|
|
|
nodeMap, ok := used[port.Value]
|
|
|
|
if !ok {
|
|
|
|
nodeMap = make(map[string]struct{})
|
|
|
|
used[port.Value] = nodeMap
|
|
|
|
}
|
|
|
|
if _, ok := nodeMap[alloc.NodeID]; ok {
|
|
|
|
t.Fatalf("Port collision on node %q %v", alloc.NodeID, port.Value)
|
2015-11-24 00:32:30 +00:00
|
|
|
}
|
2020-08-29 01:40:53 +00:00
|
|
|
nodeMap[alloc.NodeID] = struct{}{}
|
2015-11-24 00:32:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-15 21:47:13 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2015-08-14 01:51:08 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 20:01:27 +00:00
|
|
|
func TestServiceSched_JobRegister_MemoryMaxHonored(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2021-03-26 20:01:27 +00:00
|
|
|
|
|
|
|
cases := []struct {
|
2021-04-30 02:09:56 +00:00
|
|
|
name string
|
|
|
|
cpu int
|
|
|
|
memory int
|
|
|
|
memoryMax int
|
|
|
|
memoryOversubscriptionEnabled bool
|
2021-03-26 20:01:27 +00:00
|
|
|
|
2021-04-30 02:09:56 +00:00
|
|
|
expectedTaskMemoryMax int
|
2021-03-26 20:01:27 +00:00
|
|
|
// expectedTotalMemoryMax should be SUM(MAX(memory, memoryMax)) for all tasks
|
|
|
|
expectedTotalMemoryMax int
|
|
|
|
}{
|
|
|
|
{
|
2021-04-30 02:09:56 +00:00
|
|
|
name: "plain no max",
|
|
|
|
cpu: 100,
|
|
|
|
memory: 200,
|
|
|
|
memoryMax: 0,
|
|
|
|
memoryOversubscriptionEnabled: true,
|
|
|
|
|
|
|
|
expectedTaskMemoryMax: 0,
|
2021-03-26 20:01:27 +00:00
|
|
|
expectedTotalMemoryMax: 200,
|
|
|
|
},
|
|
|
|
{
|
2021-04-30 02:09:56 +00:00
|
|
|
name: "with max",
|
|
|
|
cpu: 100,
|
|
|
|
memory: 200,
|
|
|
|
memoryMax: 300,
|
|
|
|
memoryOversubscriptionEnabled: true,
|
|
|
|
|
|
|
|
expectedTaskMemoryMax: 300,
|
2021-03-26 20:01:27 +00:00
|
|
|
expectedTotalMemoryMax: 300,
|
|
|
|
},
|
2021-04-30 02:09:56 +00:00
|
|
|
{
|
|
|
|
name: "with max but disabled",
|
|
|
|
cpu: 100,
|
|
|
|
memory: 200,
|
|
|
|
memoryMax: 300,
|
|
|
|
|
|
|
|
memoryOversubscriptionEnabled: false,
|
|
|
|
expectedTaskMemoryMax: 0,
|
|
|
|
expectedTotalMemoryMax: 200, // same as no max
|
|
|
|
},
|
2021-03-26 20:01:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.name, func(t *testing.T) {
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 1
|
|
|
|
|
|
|
|
task := job.TaskGroups[0].Tasks[0].Name
|
|
|
|
res := job.TaskGroups[0].Tasks[0].Resources
|
|
|
|
res.CPU = c.cpu
|
|
|
|
res.MemoryMB = c.memory
|
|
|
|
res.MemoryMaxMB = c.memoryMax
|
|
|
|
|
|
|
|
h := NewHarness(t)
|
2021-04-30 02:09:56 +00:00
|
|
|
h.State.SchedulerSetConfig(h.NextIndex(), &structs.SchedulerConfiguration{
|
|
|
|
MemoryOversubscriptionEnabled: c.memoryOversubscriptionEnabled,
|
|
|
|
})
|
2021-03-26 20:01:27 +00:00
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, h.Plans, 1)
|
|
|
|
|
|
|
|
out, err := h.State.AllocsByJob(nil, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
require.Len(t, out, 1)
|
|
|
|
alloc := out[0]
|
|
|
|
|
|
|
|
// checking new resources field deprecated Resources fields
|
|
|
|
require.Equal(t, int64(c.cpu), alloc.AllocatedResources.Tasks[task].Cpu.CpuShares)
|
|
|
|
require.Equal(t, int64(c.memory), alloc.AllocatedResources.Tasks[task].Memory.MemoryMB)
|
2021-04-30 02:09:56 +00:00
|
|
|
require.Equal(t, int64(c.expectedTaskMemoryMax), alloc.AllocatedResources.Tasks[task].Memory.MemoryMaxMB)
|
2021-03-26 20:01:27 +00:00
|
|
|
|
|
|
|
// checking old deprecated Resources fields
|
|
|
|
require.Equal(t, c.cpu, alloc.TaskResources[task].CPU)
|
|
|
|
require.Equal(t, c.memory, alloc.TaskResources[task].MemoryMB)
|
2021-04-30 02:09:56 +00:00
|
|
|
require.Equal(t, c.expectedTaskMemoryMax, alloc.TaskResources[task].MemoryMaxMB)
|
2021-03-26 20:01:27 +00:00
|
|
|
|
|
|
|
// check total resource fields - alloc.Resources deprecated field, no modern equivalent
|
|
|
|
require.Equal(t, c.cpu, alloc.Resources.CPU)
|
|
|
|
require.Equal(t, c.memory, alloc.Resources.MemoryMB)
|
|
|
|
require.Equal(t, c.expectedTotalMemoryMax, alloc.Resources.MemoryMaxMB)
|
|
|
|
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-30 22:36:30 +00:00
|
|
|
func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-08-30 22:36:30 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-08-30 22:36:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2016-09-14 22:43:42 +00:00
|
|
|
job.TaskGroups[0].EphemeralDisk.Sticky = true
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-08-30 22:36:30 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-30 22:36:30 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-30 22:36:30 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-08-30 22:36:30 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
if err := h.Process(NewServiceScheduler, eval); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
plan := h.Plans[0]
|
2017-06-06 21:08:46 +00:00
|
|
|
planned := make(map[string]*structs.Allocation)
|
2016-08-30 22:36:30 +00:00
|
|
|
for _, allocList := range plan.NodeAllocation {
|
2017-06-06 21:08:46 +00:00
|
|
|
for _, alloc := range allocList {
|
|
|
|
planned[alloc.ID] = alloc
|
|
|
|
}
|
2016-08-30 22:36:30 +00:00
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
2017-06-06 21:08:46 +00:00
|
|
|
// Update the job to force a rolling upgrade
|
|
|
|
updated := job.Copy()
|
|
|
|
updated.TaskGroups[0].Tasks[0].Resources.CPU += 10
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), updated))
|
2016-08-30 22:36:30 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to handle the update
|
|
|
|
eval = &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-30 22:36:30 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-30 22:36:30 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-08-30 22:36:30 +00:00
|
|
|
h1 := NewHarnessWithState(t, h.State)
|
|
|
|
if err := h1.Process(NewServiceScheduler, eval); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have created only one new allocation
|
2017-06-06 21:08:46 +00:00
|
|
|
// Ensure a single plan
|
|
|
|
if len(h1.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h1.Plans)
|
|
|
|
}
|
2016-08-30 22:36:30 +00:00
|
|
|
plan = h1.Plans[0]
|
|
|
|
var newPlanned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
newPlanned = append(newPlanned, allocList...)
|
|
|
|
}
|
2017-06-06 21:08:46 +00:00
|
|
|
if len(newPlanned) != 10 {
|
2016-08-30 22:36:30 +00:00
|
|
|
t.Fatalf("bad plan: %#v", plan)
|
|
|
|
}
|
2017-06-06 21:08:46 +00:00
|
|
|
// Ensure that the new allocations were placed on the same node as the older
|
|
|
|
// ones
|
|
|
|
for _, new := range newPlanned {
|
|
|
|
if new.PreviousAllocation == "" {
|
|
|
|
t.Fatalf("new alloc %q doesn't have a previous allocation", new.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
old, ok := planned[new.PreviousAllocation]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("new alloc %q previous allocation doesn't match any prior placed alloc (%q)", new.ID, new.PreviousAllocation)
|
|
|
|
}
|
|
|
|
if new.NodeID != old.NodeID {
|
|
|
|
t.Fatalf("new alloc and old alloc node doesn't match; got %q; want %q", new.NodeID, old.NodeID)
|
|
|
|
}
|
2016-08-30 22:36:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-25 20:26:28 +00:00
|
|
|
func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-08-25 20:26:28 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-08-25 20:26:28 +00:00
|
|
|
|
|
|
|
// Create a job with count 2 and disk as 60GB so that only one allocation
|
|
|
|
// can fit
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
2016-09-14 22:43:42 +00:00
|
|
|
job.TaskGroups[0].EphemeralDisk.SizeMB = 88 * 1024
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-08-25 20:26:28 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-25 20:26:28 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-25 20:26:28 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-08-25 20:26:28 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval has a blocked eval
|
|
|
|
if len(h.CreateEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
2018-09-24 21:47:49 +00:00
|
|
|
if h.CreateEvals[0].TriggeredBy != structs.EvalTriggerQueuedAllocs {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals[0])
|
|
|
|
}
|
|
|
|
|
2016-08-25 20:26:28 +00:00
|
|
|
// Ensure the plan allocated only one allocation
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-08-25 20:26:28 +00:00
|
|
|
|
|
|
|
// Ensure only one allocation was placed
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2017-03-08 19:47:55 +00:00
|
|
|
func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job that uses distinct host and has count 1 higher than what is
|
|
|
|
// possible.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 11
|
|
|
|
job.Constraints = append(job.Constraints, &structs.Constraint{Operand: structs.ConstraintDistinctHosts})
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the eval has spawned blocked eval
|
|
|
|
if len(h.CreateEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan failed to alloc
|
|
|
|
outEval := h.Evals[0]
|
|
|
|
if len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %+v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure different node was used per.
|
|
|
|
used := make(map[string]struct{})
|
|
|
|
for _, alloc := range out {
|
|
|
|
if _, ok := used[alloc.NodeID]; ok {
|
|
|
|
t.Fatalf("Node collision %v", alloc.NodeID)
|
|
|
|
}
|
|
|
|
used[alloc.NodeID] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
rack := "rack2"
|
|
|
|
if i < 5 {
|
|
|
|
rack = "rack1"
|
|
|
|
}
|
|
|
|
node.Meta["rack"] = rack
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job that uses distinct property and has count higher than what is
|
|
|
|
// possible.
|
|
|
|
job := mock.Job()
|
2017-07-31 23:44:17 +00:00
|
|
|
job.TaskGroups[0].Count = 8
|
2017-03-07 22:20:02 +00:00
|
|
|
job.Constraints = append(job.Constraints,
|
|
|
|
&structs.Constraint{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.rack}",
|
2017-07-31 23:44:17 +00:00
|
|
|
RTarget: "2",
|
2017-03-07 22:20:02 +00:00
|
|
|
})
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval has spawned blocked eval
|
|
|
|
if len(h.CreateEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan failed to alloc
|
|
|
|
outEval := h.Evals[0]
|
|
|
|
if len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %+v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
2017-07-31 23:44:17 +00:00
|
|
|
if len(planned) != 4 {
|
2017-03-07 22:20:02 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2017-07-31 23:44:17 +00:00
|
|
|
if len(out) != 4 {
|
2017-03-07 22:20:02 +00:00
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
2017-07-31 23:44:17 +00:00
|
|
|
// Ensure each node was only used twice
|
|
|
|
used := make(map[string]uint64)
|
2017-03-07 22:20:02 +00:00
|
|
|
for _, alloc := range out {
|
2017-07-31 23:44:17 +00:00
|
|
|
if count, _ := used[alloc.NodeID]; count > 2 {
|
|
|
|
t.Fatalf("Node %v used too much: %d", alloc.NodeID, count)
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
2017-07-31 23:44:17 +00:00
|
|
|
used[alloc.NodeID]++
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2017-03-10 00:12:43 +00:00
|
|
|
func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-03-10 00:12:43 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
node.Meta["ssd"] = "true"
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2017-03-10 00:12:43 +00:00
|
|
|
}
|
|
|
|
|
2017-07-31 23:44:17 +00:00
|
|
|
// Create a job that uses distinct property only on one task group.
|
2017-03-10 00:12:43 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups = append(job.TaskGroups, job.TaskGroups[0].Copy())
|
|
|
|
job.TaskGroups[0].Count = 1
|
|
|
|
job.TaskGroups[0].Constraints = append(job.TaskGroups[0].Constraints,
|
|
|
|
&structs.Constraint{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.ssd}",
|
|
|
|
})
|
|
|
|
|
|
|
|
job.TaskGroups[1].Name = "tg2"
|
2017-07-31 23:44:17 +00:00
|
|
|
job.TaskGroups[1].Count = 2
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-03-10 00:12:43 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-10 00:12:43 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-03-10 00:12:43 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-03-10 00:12:43 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval hasn't spawned blocked eval
|
|
|
|
if len(h.CreateEvals) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
2017-07-31 23:44:17 +00:00
|
|
|
if len(planned) != 3 {
|
2017-03-10 00:12:43 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-03-10 00:12:43 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2017-07-31 23:44:17 +00:00
|
|
|
if len(out) != 3 {
|
2017-03-10 00:12:43 +00:00
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2017-07-31 18:17:35 +00:00
|
|
|
func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-07-31 18:17:35 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
// Create a job that uses distinct property over the node-id
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 3
|
|
|
|
job.TaskGroups[0].Constraints = append(job.TaskGroups[0].Constraints,
|
|
|
|
&structs.Constraint{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${node.unique.id}",
|
|
|
|
})
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job), "UpsertJob")
|
2017-07-31 18:17:35 +00:00
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 6; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node), "UpsertNode")
|
2017-07-31 18:17:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create some allocations
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs), "UpsertAllocs")
|
2017-07-31 18:17:35 +00:00
|
|
|
|
|
|
|
// Update the count
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.TaskGroups[0].Count = 6
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2), "UpsertJob")
|
2017-07-31 18:17:35 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-31 18:17:35 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-07-31 18:17:35 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-07-31 18:17:35 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
assert.Nil(h.Process(NewServiceScheduler, eval), "Process")
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
assert.Len(h.Plans, 1, "Number of plans")
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
assert.Nil(plan.Annotations, "Plan.Annotations")
|
|
|
|
|
|
|
|
// Ensure the eval hasn't spawned blocked eval
|
|
|
|
assert.Len(h.CreateEvals, 0, "Created Evals")
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
assert.Len(planned, 6, "Planned Allocations")
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2017-07-31 18:17:35 +00:00
|
|
|
assert.Nil(err, "AllocsByJob")
|
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
assert.Len(out, 6, "Placed Allocations")
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2018-07-17 22:25:38 +00:00
|
|
|
// Test job registration with spread configured
|
|
|
|
func TestServiceSched_Spread(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-07-17 22:25:38 +00:00
|
|
|
assert := assert.New(t)
|
|
|
|
|
2019-01-30 20:20:38 +00:00
|
|
|
start := uint8(100)
|
|
|
|
step := uint8(10)
|
2018-07-31 02:59:35 +00:00
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
name := fmt.Sprintf("%d%% in dc1", start)
|
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
2019-01-30 20:20:38 +00:00
|
|
|
remaining := uint8(100 - start)
|
2018-07-31 02:59:35 +00:00
|
|
|
// Create a job that uses spread over data center
|
|
|
|
job := mock.Job()
|
2023-02-02 14:57:45 +00:00
|
|
|
job.Datacenters = []string{"dc*"}
|
2018-07-31 02:59:35 +00:00
|
|
|
job.TaskGroups[0].Count = 10
|
|
|
|
job.TaskGroups[0].Spreads = append(job.TaskGroups[0].Spreads,
|
|
|
|
&structs.Spread{
|
|
|
|
Attribute: "${node.datacenter}",
|
|
|
|
Weight: 100,
|
|
|
|
SpreadTarget: []*structs.SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "dc1",
|
|
|
|
Percent: start,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: "dc2",
|
|
|
|
Percent: remaining,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job), "UpsertJob")
|
2018-07-31 02:59:35 +00:00
|
|
|
// Create some nodes, half in dc2
|
|
|
|
var nodes []*structs.Node
|
|
|
|
nodeMap := make(map[string]*structs.Node)
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
if i%2 == 0 {
|
|
|
|
node.Datacenter = "dc2"
|
|
|
|
}
|
scheduling: prevent self-collision in dynamic port network offerings (#16401)
When the scheduler tries to find a placement for a new allocation, it iterates
over a subset of nodes. For each node, we populate a `NetworkIndex` bitmap with
the ports of all existing allocations and any other allocations already proposed
as part of this same evaluation via its `SetAllocs` method. Then we make an
"ask" of the `NetworkIndex` in `AssignPorts` for any ports we need and receive
an "offer" in return. The offer will include both static ports and any dynamic
port assignments.
The `AssignPorts` method was written to support group networks, and it shares
code that selects dynamic ports with the original `AssignTaskNetwork`
code. `AssignTaskNetwork` can request multiple ports from the bitmap at a
time. But `AssignPorts` requests them one at a time and does not account for
possible collisions, and doesn't return an error in that case.
What happens next varies:
1. If the scheduler doesn't place the allocation on that node, the port
conflict is thrown away and there's no problem.
2. If the node is picked and this is the only allocation (or last allocation),
the plan applier will reject the plan when it calls `SetAllocs`, as we'd expect.
3. If the node is picked and there are additional allocations in the same eval
that iterate over the same node, their call to `SetAllocs` will detect the
impossible state and the node will be rejected. This can have the puzzling
behavior where a second task group for the job without any networking at all
can hit a port collision error!
It looks like this bug has existed since we implemented group networks, but
there are several factors that add up to making the issue rare for many users
yet frustratingly frequent for others:
* You're more likely to hit this bug the more tightly packed your range for
dynamic ports is. With 12000 ports in the range by default, many clusters can
avoid this for a long time.
* You're more likely to hit case (3) for jobs with lots of allocations or if a
scheduler has to iterate over a large number of nodes, such as with system jobs,
jobs with `spread` blocks, or (sometimes) jobs using `unique` constraints.
For unlucky combinations of these factors, it's possible that case (3) happens
repeatedly, preventing scheduling of a given job until a client state
change (ex. restarting the agent so all its allocations are rescheduled
elsewhere) re-opens the range of dynamic ports available.
This changeset:
* Fixes the bug by accounting for collisions in dynamic port selection in
`AssignPorts`.
* Adds test coverage for `AssignPorts`, expands coverage of this case for the
deprecated `AssignTaskNetwork`, and tightens the dynamic port range in a
scheduler test for spread scheduling to more easily detect this kind of problem
in the future.
* Adds a `String()` method to `Bitmap` so that any future "screaming" log lines
have a human-readable list of used ports.
2023-03-09 15:09:54 +00:00
|
|
|
// setting a narrow range makes it more likely for this test to
|
|
|
|
// hit bugs in NetworkIndex
|
|
|
|
node.NodeResources.MinDynamicPort = 20000
|
|
|
|
node.NodeResources.MaxDynamicPort = 20005
|
2018-07-31 02:59:35 +00:00
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node), "UpsertNode")
|
2018-07-31 02:59:35 +00:00
|
|
|
nodeMap[node.ID] = node
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-07-31 02:59:35 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
assert.Nil(h.Process(NewServiceScheduler, eval), "Process")
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
assert.Len(h.Plans, 1, "Number of plans")
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
assert.Nil(plan.Annotations, "Plan.Annotations")
|
|
|
|
|
|
|
|
// Ensure the eval hasn't spawned blocked eval
|
|
|
|
assert.Len(h.CreateEvals, 0, "Created Evals")
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
dcAllocsMap := make(map[string]int)
|
|
|
|
for nodeId, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
dc := nodeMap[nodeId].Datacenter
|
|
|
|
c := dcAllocsMap[dc]
|
|
|
|
c += len(allocList)
|
|
|
|
dcAllocsMap[dc] = c
|
|
|
|
}
|
|
|
|
assert.Len(planned, 10, "Planned Allocations")
|
|
|
|
|
|
|
|
expectedCounts := make(map[string]int)
|
|
|
|
expectedCounts["dc1"] = 10 - i
|
|
|
|
if i > 0 {
|
|
|
|
expectedCounts["dc2"] = i
|
|
|
|
}
|
|
|
|
require.Equal(t, expectedCounts, dcAllocsMap)
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
})
|
|
|
|
start = start - step
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test job registration with even spread across dc
|
|
|
|
func TestServiceSched_EvenSpread(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-07-31 02:59:35 +00:00
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
h := NewHarness(t)
|
|
|
|
// Create a job that uses even spread over data center
|
2018-07-17 22:25:38 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.Datacenters = []string{"dc1", "dc2"}
|
|
|
|
job.TaskGroups[0].Count = 10
|
|
|
|
job.TaskGroups[0].Spreads = append(job.TaskGroups[0].Spreads,
|
|
|
|
&structs.Spread{
|
|
|
|
Attribute: "${node.datacenter}",
|
|
|
|
Weight: 100,
|
|
|
|
})
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job), "UpsertJob")
|
2018-07-17 22:25:38 +00:00
|
|
|
// Create some nodes, half in dc2
|
|
|
|
var nodes []*structs.Node
|
|
|
|
nodeMap := make(map[string]*structs.Node)
|
2018-07-31 02:59:35 +00:00
|
|
|
for i := 0; i < 10; i++ {
|
2018-07-17 22:25:38 +00:00
|
|
|
node := mock.Node()
|
|
|
|
if i%2 == 0 {
|
|
|
|
node.Datacenter = "dc2"
|
|
|
|
}
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
assert.Nil(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node), "UpsertNode")
|
2018-07-17 22:25:38 +00:00
|
|
|
nodeMap[node.ID] = node
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-07-17 22:25:38 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
assert.Nil(h.Process(NewServiceScheduler, eval), "Process")
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
assert.Len(h.Plans, 1, "Number of plans")
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
assert.Nil(plan.Annotations, "Plan.Annotations")
|
|
|
|
|
|
|
|
// Ensure the eval hasn't spawned blocked eval
|
|
|
|
assert.Len(h.CreateEvals, 0, "Created Evals")
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
dcAllocsMap := make(map[string]int)
|
|
|
|
for nodeId, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
dc := nodeMap[nodeId].Datacenter
|
|
|
|
c := dcAllocsMap[dc]
|
|
|
|
c += len(allocList)
|
|
|
|
dcAllocsMap[dc] = c
|
|
|
|
}
|
|
|
|
assert.Len(planned, 10, "Planned Allocations")
|
|
|
|
|
2018-07-31 02:59:35 +00:00
|
|
|
// Expect even split allocs across datacenter
|
2018-07-17 22:25:38 +00:00
|
|
|
expectedCounts := make(map[string]int)
|
2018-07-31 02:59:35 +00:00
|
|
|
expectedCounts["dc1"] = 5
|
|
|
|
expectedCounts["dc2"] = 5
|
|
|
|
|
2018-07-17 22:25:38 +00:00
|
|
|
require.Equal(t, expectedCounts, dcAllocsMap)
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-05-05 18:21:58 +00:00
|
|
|
func TestServiceSched_JobRegister_Annotate(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-05-05 18:21:58 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-05-05 18:21:58 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-05-05 18:21:58 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
AnnotatePlan: true,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-05-05 18:21:58 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-05-05 18:21:58 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
|
|
|
// Ensure the plan had annotations.
|
|
|
|
if plan.Annotations == nil {
|
|
|
|
t.Fatalf("expected annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
desiredTGs := plan.Annotations.DesiredTGUpdates
|
|
|
|
if l := len(desiredTGs); l != 1 {
|
|
|
|
t.Fatalf("incorrect number of task groups; got %v; want %v", l, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
desiredChanges, ok := desiredTGs["web"]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("expected task group web to have desired changes")
|
|
|
|
}
|
|
|
|
|
|
|
|
expected := &structs.DesiredUpdates{Place: 10}
|
|
|
|
if !reflect.DeepEqual(desiredChanges, expected) {
|
|
|
|
t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-17 18:02:59 +00:00
|
|
|
func TestServiceSched_JobRegister_CountZero(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-03-17 18:02:59 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-03-17 18:02:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job and set the task group count to zero.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 0
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-03-17 18:02:59 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-03-17 18:02:59 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure there was no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Ensure no allocations placed
|
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2015-08-15 20:40:13 +00:00
|
|
|
func TestServiceSched_JobRegister_AllocFail(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-08-15 20:40:13 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create NO nodes
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2015-08-15 20:40:13 +00:00
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Create a mock evaluation to register the job
|
2015-08-15 20:40:13 +00:00
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-15 20:40:13 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-15 20:40:13 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2015-08-15 20:40:13 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure no plan
|
|
|
|
if len(h.Plans) != 0 {
|
2015-08-15 20:40:13 +00:00
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure there is a follow up eval.
|
2016-01-28 21:43:48 +00:00
|
|
|
if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusBlocked {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("incorrect number of updated eval: %#v", h.Evals)
|
2015-08-15 20:40:13 +00:00
|
|
|
}
|
2016-05-19 01:11:40 +00:00
|
|
|
outEval := h.Evals[0]
|
2015-08-15 20:40:13 +00:00
|
|
|
|
2016-05-19 20:09:52 +00:00
|
|
|
// Ensure the eval has its spawned blocked eval
|
2016-05-25 01:12:59 +00:00
|
|
|
if outEval.BlockedEval != h.CreateEvals[0].ID {
|
2016-05-19 20:09:52 +00:00
|
|
|
t.Fatalf("bad: %#v", outEval)
|
|
|
|
}
|
2015-08-15 20:40:13 +00:00
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure the plan failed to alloc
|
|
|
|
if outEval == nil || len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", outEval)
|
|
|
|
}
|
2015-08-15 20:40:13 +00:00
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
metrics, ok := outEval.FailedTGAllocs[job.TaskGroups[0].Name]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs)
|
2015-08-15 20:40:13 +00:00
|
|
|
}
|
2015-08-15 21:47:13 +00:00
|
|
|
|
2015-08-16 17:03:21 +00:00
|
|
|
// Check the coalesced failures
|
2016-05-19 01:11:40 +00:00
|
|
|
if metrics.CoalescedFailures != 9 {
|
|
|
|
t.Fatalf("bad: %#v", metrics)
|
2016-01-04 22:23:06 +00:00
|
|
|
}
|
|
|
|
|
2023-02-02 14:57:45 +00:00
|
|
|
_, ok = metrics.NodesAvailable["dc1"]
|
|
|
|
must.False(t, ok, must.Sprintf(
|
|
|
|
"expected NodesAvailable metric to be unpopulated when there are no nodes"))
|
2015-08-16 17:03:21 +00:00
|
|
|
|
2016-07-18 22:04:05 +00:00
|
|
|
// Check queued allocations
|
|
|
|
queued := outEval.QueuedAllocations["web"]
|
|
|
|
if queued != 10 {
|
|
|
|
t.Fatalf("expected queued: %v, actual: %v", 10, queued)
|
|
|
|
}
|
2016-01-28 21:43:48 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-05-20 23:03:53 +00:00
|
|
|
func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-01-28 21:43:48 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a full node
|
|
|
|
node := mock.Node()
|
2018-10-03 16:47:18 +00:00
|
|
|
node.ReservedResources = &structs.NodeReservedResources{
|
|
|
|
Cpu: structs.NodeReservedCpuResources{
|
2018-10-04 21:33:09 +00:00
|
|
|
CpuShares: node.NodeResources.Cpu.CpuShares,
|
2018-10-03 16:47:18 +00:00
|
|
|
},
|
|
|
|
}
|
2016-01-28 21:43:48 +00:00
|
|
|
node.ComputeClass()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-01-28 21:43:48 +00:00
|
|
|
|
|
|
|
// Create an ineligible node
|
|
|
|
node2 := mock.Node()
|
|
|
|
node2.Attributes["kernel.name"] = "windows"
|
|
|
|
node2.ComputeClass()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2))
|
2016-01-28 21:43:48 +00:00
|
|
|
|
|
|
|
// Create a jobs
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-01-28 21:43:48 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-01-28 21:43:48 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-01-28 21:43:48 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure no plan
|
|
|
|
if len(h.Plans) != 0 {
|
2016-01-28 21:43:48 +00:00
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan has created a follow up eval.
|
|
|
|
if len(h.CreateEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
|
|
|
created := h.CreateEvals[0]
|
|
|
|
if created.Status != structs.EvalStatusBlocked {
|
|
|
|
t.Fatalf("bad: %#v", created)
|
|
|
|
}
|
|
|
|
|
2016-01-30 01:46:44 +00:00
|
|
|
classes := created.ClassEligibility
|
|
|
|
if len(classes) != 2 || !classes[node.ComputedClass] || classes[node2.ComputedClass] {
|
|
|
|
t.Fatalf("bad: %#v", classes)
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if created.EscapedComputedClass {
|
|
|
|
t.Fatalf("bad: %#v", created)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure there is a follow up eval.
|
|
|
|
if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusBlocked {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("incorrect number of updated eval: %#v", h.Evals)
|
|
|
|
}
|
|
|
|
outEval := h.Evals[0]
|
2016-01-28 21:43:48 +00:00
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure the plan failed to alloc
|
|
|
|
if outEval == nil || len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics, ok := outEval.FailedTGAllocs[job.TaskGroups[0].Name]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs)
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the coalesced failures
|
2016-05-19 01:11:40 +00:00
|
|
|
if metrics.CoalescedFailures != 9 {
|
|
|
|
t.Fatalf("bad: %#v", metrics)
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the available nodes
|
2016-05-19 01:11:40 +00:00
|
|
|
if count, ok := metrics.NodesAvailable["dc1"]; !ok || count != 2 {
|
|
|
|
t.Fatalf("bad: %#v", metrics)
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
2015-08-15 21:47:13 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2015-08-15 20:40:13 +00:00
|
|
|
}
|
|
|
|
|
2016-02-04 05:22:18 +00:00
|
|
|
func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-02-04 05:22:18 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create one node
|
|
|
|
node := mock.Node()
|
|
|
|
node.NodeClass = "class_0"
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, node.ComputeClass())
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-02-04 05:22:18 +00:00
|
|
|
|
|
|
|
// Create a job that constrains on a node class
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].Constraints = append(job.Constraints,
|
|
|
|
&structs.Constraint{
|
2016-02-05 00:50:20 +00:00
|
|
|
LTarget: "${node.class}",
|
2016-02-04 05:22:18 +00:00
|
|
|
RTarget: "class_0",
|
|
|
|
Operand: "=",
|
|
|
|
},
|
|
|
|
)
|
|
|
|
tg2 := job.TaskGroups[0].Copy()
|
|
|
|
tg2.Name = "web2"
|
|
|
|
tg2.Constraints[1].RTarget = "class_1"
|
|
|
|
job.TaskGroups = append(job.TaskGroups, tg2)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-02-04 05:22:18 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-02-04 05:22:18 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-02-04 05:22:18 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-02-04 05:22:18 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 2 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure two allocations placed
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-05-19 01:11:40 +00:00
|
|
|
if len(out) != 2 {
|
2016-02-04 05:22:18 +00:00
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("incorrect number of updated eval: %#v", h.Evals)
|
|
|
|
}
|
|
|
|
outEval := h.Evals[0]
|
|
|
|
|
2016-05-19 20:09:52 +00:00
|
|
|
// Ensure the eval has its spawned blocked eval
|
2016-05-25 01:12:59 +00:00
|
|
|
if outEval.BlockedEval != h.CreateEvals[0].ID {
|
2016-05-19 20:09:52 +00:00
|
|
|
t.Fatalf("bad: %#v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan failed to alloc one tg
|
2016-05-19 01:11:40 +00:00
|
|
|
if outEval == nil || len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics, ok := outEval.FailedTGAllocs[tg2.Name]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the coalesced failures
|
|
|
|
if metrics.CoalescedFailures != tg2.Count-1 {
|
|
|
|
t.Fatalf("bad: %#v", metrics)
|
|
|
|
}
|
|
|
|
|
2016-02-04 05:22:18 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-06-21 00:56:49 +00:00
|
|
|
// This test just ensures the scheduler handles the eval type to avoid
|
|
|
|
// regressions.
|
|
|
|
func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-05-20 23:03:53 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a job and set the task group count to zero.
|
|
|
|
job := mock.Job()
|
2016-06-21 00:56:49 +00:00
|
|
|
job.TaskGroups[0].Count = 0
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-06-21 00:56:49 +00:00
|
|
|
|
|
|
|
// Create a mock blocked evaluation
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-06-21 00:56:49 +00:00
|
|
|
Status: structs.EvalStatusBlocked,
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerMaxPlans,
|
|
|
|
JobID: job.ID,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert it into the state store
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-06-21 00:56:49 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure there was no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-07-22 23:28:21 +00:00
|
|
|
func TestServiceSched_Plan_Partial_Progress(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-07-22 23:28:21 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-07-22 23:28:21 +00:00
|
|
|
|
2016-07-25 21:56:38 +00:00
|
|
|
// Create a job with a high resource ask so that all the allocations can't
|
|
|
|
// be placed on a single node.
|
2016-07-22 23:28:21 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 3
|
|
|
|
job.TaskGroups[0].Tasks[0].Resources.CPU = 3600
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-07-22 23:28:21 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-07-22 23:28:21 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-07-22 23:28:21 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-07-22 23:28:21 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-07-22 23:28:21 +00:00
|
|
|
|
|
|
|
// Ensure only one allocations placed
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
queued := h.Evals[0].QueuedAllocations["web"]
|
|
|
|
if queued != 2 {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", 2, queued)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-06-21 00:56:49 +00:00
|
|
|
func TestServiceSched_EvaluateBlockedEval(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-06-21 00:56:49 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-05-20 23:03:53 +00:00
|
|
|
|
|
|
|
// Create a mock blocked evaluation
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-05-20 23:03:53 +00:00
|
|
|
Status: structs.EvalStatusBlocked,
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert it into the state store
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-05-20 23:03:53 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure there was no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the eval was reblocked
|
|
|
|
if len(h.ReblockEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.ReblockEvals)
|
|
|
|
}
|
|
|
|
if h.ReblockEvals[0].ID != eval.ID {
|
|
|
|
t.Fatalf("expect same eval to be reblocked; got %q; want %q", h.ReblockEvals[0].ID, eval.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval status was not updated
|
|
|
|
if len(h.Evals) != 0 {
|
|
|
|
t.Fatalf("Existing eval should not have status set")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-05-20 23:03:53 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-05-20 23:03:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job and set the task group count to zero.
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-05-20 23:03:53 +00:00
|
|
|
|
|
|
|
// Create a mock blocked evaluation
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-05-20 23:03:53 +00:00
|
|
|
Status: structs.EvalStatusBlocked,
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert it into the state store
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-05-20 23:03:53 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval has no spawned blocked eval
|
|
|
|
if len(h.Evals) != 1 {
|
2022-08-08 09:26:08 +00:00
|
|
|
t.Errorf("bad: %#v", h.Evals)
|
2016-05-25 17:28:25 +00:00
|
|
|
if h.Evals[0].BlockedEval != "" {
|
2016-05-20 23:03:53 +00:00
|
|
|
t.Fatalf("bad: %#v", h.Evals[0])
|
|
|
|
}
|
2022-08-08 09:26:08 +00:00
|
|
|
t.FailNow()
|
2016-05-20 23:03:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-05-20 23:03:53 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval was not reblocked
|
|
|
|
if len(h.ReblockEvals) != 0 {
|
|
|
|
t.Fatalf("Existing eval should not have been reblocked as it placed all allocations")
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2016-07-18 22:04:05 +00:00
|
|
|
|
|
|
|
// Ensure queued allocations is zero
|
|
|
|
queued := h.Evals[0].QueuedAllocations["web"]
|
|
|
|
if queued != 0 {
|
|
|
|
t.Fatalf("expected queued: %v, actual: %v", 0, queued)
|
|
|
|
}
|
2016-05-20 23:03:53 +00:00
|
|
|
}
|
|
|
|
|
2015-08-14 01:51:08 +00:00
|
|
|
func TestServiceSched_JobModify(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-08-14 05:14:37 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2015-08-14 05:14:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2015-08-14 05:14:37 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
2015-09-07 19:27:12 +00:00
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2015-08-14 05:14:37 +00:00
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2015-08-14 05:14:37 +00:00
|
|
|
|
2015-09-18 04:25:55 +00:00
|
|
|
// Add a few terminal status allocations, these should be ignored
|
|
|
|
var terminal []*structs.Allocation
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2016-07-13 19:20:46 +00:00
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
2022-09-13 19:52:47 +00:00
|
|
|
alloc.ClientStatus = structs.AllocClientStatusFailed // #10446
|
2015-09-18 04:25:55 +00:00
|
|
|
terminal = append(terminal, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal))
|
2015-09-18 04:25:55 +00:00
|
|
|
|
2015-08-14 05:14:37 +00:00
|
|
|
// Update the job
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
// Update the task, such that it cannot be done in-place
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2015-08-14 05:14:37 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-14 05:14:37 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-14 05:14:37 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-08-14 05:14:37 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all allocs
|
2015-08-26 00:06:06 +00:00
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
2015-08-14 05:14:37 +00:00
|
|
|
}
|
2015-08-26 00:06:06 +00:00
|
|
|
if len(update) != len(allocs) {
|
2015-08-14 05:14:37 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2015-08-14 05:14:37 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2016-08-30 22:36:30 +00:00
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
2015-08-14 05:14:37 +00:00
|
|
|
if len(out) != 10 {
|
2015-08-26 00:06:06 +00:00
|
|
|
t.Fatalf("bad: %#v", out)
|
2015-08-14 05:14:37 +00:00
|
|
|
}
|
2015-08-15 21:47:13 +00:00
|
|
|
|
2016-03-17 18:02:59 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2021-07-07 15:14:20 +00:00
|
|
|
func TestServiceSched_JobModify_Datacenters(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2021-07-07 15:14:20 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Create some nodes in 3 DCs
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 1; i < 4; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
node.Datacenter = fmt.Sprintf("dc%d", i)
|
|
|
|
nodes = append(nodes, node)
|
|
|
|
h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 3
|
|
|
|
job.Datacenters = []string{"dc1", "dc2", "dc3"}
|
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
|
|
|
|
|
|
|
// Update the job to 2 DCs
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.TaskGroups[0].Count = 4
|
|
|
|
job2.Datacenters = []string{"dc1", "dc2"}
|
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(err)
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
require.Len(h.Plans, 1)
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
require.Len(plan.NodeUpdate, 1) // alloc in DC3 gets destructive update
|
|
|
|
require.Len(plan.NodeUpdate[nodes[2].ID], 1)
|
|
|
|
require.Equal(allocs[2].ID, plan.NodeUpdate[nodes[2].ID][0].ID)
|
|
|
|
|
|
|
|
require.Len(plan.NodeAllocation, 2) // only 2 eligible nodes
|
|
|
|
placed := map[string]*structs.Allocation{}
|
|
|
|
for node, placedAllocs := range plan.NodeAllocation {
|
|
|
|
require.True(
|
2022-09-21 19:53:25 +00:00
|
|
|
slices.Contains([]string{nodes[0].ID, nodes[1].ID}, node),
|
2021-07-07 15:14:20 +00:00
|
|
|
"allocation placed on ineligible node",
|
|
|
|
)
|
|
|
|
for _, alloc := range placedAllocs {
|
|
|
|
placed[alloc.ID] = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.Len(placed, 4)
|
|
|
|
require.Equal(nodes[0].ID, placed[allocs[0].ID].NodeID, "alloc should not have moved")
|
|
|
|
require.Equal(nodes[1].ID, placed[allocs[1].ID].NodeID, "alloc should not have moved")
|
|
|
|
}
|
|
|
|
|
2016-03-21 21:17:37 +00:00
|
|
|
// Have a single node and submit a job. Increment the count such that all fit
|
|
|
|
// on the node but the node doesn't have enough resources to fit the new count +
|
|
|
|
// 1. This tests that we properly discount the resources of existing allocs.
|
|
|
|
func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-03-21 21:17:37 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create one node
|
|
|
|
node := mock.Node()
|
2018-10-04 21:33:09 +00:00
|
|
|
node.NodeResources.Cpu.CpuShares = 1000
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
// Generate a fake job with one allocation
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Tasks[0].Resources.CPU = 256
|
|
|
|
job2 := job.Copy()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
2018-10-03 16:47:18 +00:00
|
|
|
alloc.AllocatedResources.Tasks["web"].Cpu.CpuShares = 256
|
2016-03-21 21:17:37 +00:00
|
|
|
allocs = append(allocs, alloc)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
// Update the job to count 3
|
|
|
|
job2.TaskGroups[0].Count = 3
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-03-21 21:17:37 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-03-21 21:17:37 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan didn't evicted the alloc
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
if len(update) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
2016-03-22 00:23:04 +00:00
|
|
|
if len(planned) != 3 {
|
2016-03-21 21:17:37 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure the plan had no failures
|
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("incorrect number of updated eval: %#v", h.Evals)
|
|
|
|
}
|
|
|
|
outEval := h.Evals[0]
|
|
|
|
if outEval == nil || len(outEval.FailedTGAllocs) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", outEval)
|
2016-03-21 21:17:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2016-08-30 22:36:30 +00:00
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
2016-03-22 00:23:04 +00:00
|
|
|
if len(out) != 3 {
|
2016-03-21 21:17:37 +00:00
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-03-17 18:02:59 +00:00
|
|
|
func TestServiceSched_JobModify_CountZero(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-03-17 18:02:59 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-03-17 18:02:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
2017-05-31 18:34:46 +00:00
|
|
|
alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i))
|
2016-03-17 18:02:59 +00:00
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Add a few terminal status allocations, these should be ignored
|
|
|
|
var terminal []*structs.Allocation
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
2017-05-31 18:34:46 +00:00
|
|
|
alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i))
|
2016-07-13 19:20:46 +00:00
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
2016-03-17 18:02:59 +00:00
|
|
|
terminal = append(terminal, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), terminal))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Update the job to be count zero
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
|
|
|
job2.TaskGroups[0].Count = 0
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-03-17 18:02:59 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-03-17 18:02:59 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all allocs
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
if len(update) != len(allocs) {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan didn't allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2016-08-30 22:36:30 +00:00
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
2016-03-17 18:02:59 +00:00
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
2015-08-15 21:47:13 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2015-08-14 01:51:08 +00:00
|
|
|
}
|
|
|
|
|
2015-09-07 22:17:39 +00:00
|
|
|
func TestServiceSched_JobModify_Rolling(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-09-07 22:17:39 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2015-09-07 22:17:39 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2015-09-07 22:17:39 +00:00
|
|
|
|
|
|
|
// Update the job
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
2017-06-06 21:08:46 +00:00
|
|
|
desiredUpdates := 4
|
|
|
|
job2.TaskGroups[0].Update = &structs.UpdateStrategy{
|
|
|
|
MaxParallel: desiredUpdates,
|
|
|
|
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
|
|
|
|
MinHealthyTime: 10 * time.Second,
|
|
|
|
HealthyDeadline: 10 * time.Minute,
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the task, such that it cannot be done in-place
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2015-09-07 22:17:39 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-09-07 22:17:39 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-09-07 22:17:39 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted only MaxParallel
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
2017-06-06 21:08:46 +00:00
|
|
|
if len(update) != desiredUpdates {
|
|
|
|
t.Fatalf("bad: got %d; want %d: %#v", len(update), desiredUpdates, plan)
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
2017-06-06 21:08:46 +00:00
|
|
|
if len(planned) != desiredUpdates {
|
2015-09-07 22:17:39 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
2017-07-06 00:13:45 +00:00
|
|
|
// Check that the deployment id is attached to the eval
|
|
|
|
if h.Evals[0].DeploymentID == "" {
|
|
|
|
t.Fatalf("Eval not annotated with deployment id")
|
|
|
|
}
|
|
|
|
|
2017-06-06 21:08:46 +00:00
|
|
|
// Ensure a deployment was created
|
2017-07-04 20:31:01 +00:00
|
|
|
if plan.Deployment == nil {
|
2017-06-06 21:08:46 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
2020-07-17 18:07:43 +00:00
|
|
|
dstate, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name]
|
2017-06-06 21:08:46 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
2020-07-17 18:07:43 +00:00
|
|
|
if dstate.DesiredTotal != 10 && dstate.DesiredCanaries != 0 {
|
|
|
|
t.Fatalf("bad: %#v", dstate)
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-18 00:18:12 +00:00
|
|
|
// This tests that the old allocation is stopped before placing.
|
2017-07-20 19:23:40 +00:00
|
|
|
// It is critical to test that the updated job attempts to place more
|
|
|
|
// allocations as this allows us to assert that destructive changes are done
|
|
|
|
// first.
|
2017-07-18 00:18:12 +00:00
|
|
|
func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-07-18 00:18:12 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
2018-10-03 16:47:18 +00:00
|
|
|
// Create a node and clear the reserved resources
|
2017-07-18 00:18:12 +00:00
|
|
|
node := mock.Node()
|
2018-10-03 16:47:18 +00:00
|
|
|
node.ReservedResources = nil
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2017-07-18 00:18:12 +00:00
|
|
|
|
2018-10-03 16:47:18 +00:00
|
|
|
// Create a resource ask that is the same as the resources available on the
|
|
|
|
// node
|
2018-10-04 21:33:09 +00:00
|
|
|
cpu := node.NodeResources.Cpu.CpuShares
|
2018-10-03 16:47:18 +00:00
|
|
|
mem := node.NodeResources.Memory.MemoryMB
|
|
|
|
|
|
|
|
request := &structs.Resources{
|
|
|
|
CPU: int(cpu),
|
|
|
|
MemoryMB: int(mem),
|
|
|
|
}
|
|
|
|
allocated := &structs.AllocatedResources{
|
|
|
|
Tasks: map[string]*structs.AllocatedTaskResources{
|
|
|
|
"web": {
|
|
|
|
Cpu: structs.AllocatedCpuResources{
|
|
|
|
CpuShares: cpu,
|
|
|
|
},
|
|
|
|
Memory: structs.AllocatedMemoryResources{
|
|
|
|
MemoryMB: mem,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2017-07-18 00:18:12 +00:00
|
|
|
|
|
|
|
// Generate a fake job with one alloc that consumes the whole node
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 1
|
2018-10-03 16:47:18 +00:00
|
|
|
job.TaskGroups[0].Tasks[0].Resources = request
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-07-18 00:18:12 +00:00
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
2018-10-03 16:47:18 +00:00
|
|
|
alloc.AllocatedResources = allocated
|
2017-07-18 00:18:12 +00:00
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc}))
|
2017-07-18 00:18:12 +00:00
|
|
|
|
2017-07-20 19:23:40 +00:00
|
|
|
// Update the job to place more versions of the task group, drop the count
|
|
|
|
// and force destructive updates
|
2017-07-18 00:18:12 +00:00
|
|
|
job2 := job.Copy()
|
2017-07-20 19:23:40 +00:00
|
|
|
job2.TaskGroups[0].Count = 5
|
2017-07-18 00:18:12 +00:00
|
|
|
job2.TaskGroups[0].Update = &structs.UpdateStrategy{
|
2017-08-21 21:07:54 +00:00
|
|
|
MaxParallel: 5,
|
2017-07-18 00:18:12 +00:00
|
|
|
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
|
|
|
|
MinHealthyTime: 10 * time.Second,
|
|
|
|
HealthyDeadline: 10 * time.Minute,
|
|
|
|
}
|
2018-10-03 16:47:18 +00:00
|
|
|
job2.TaskGroups[0].Tasks[0].Resources = mock.Job().TaskGroups[0].Tasks[0].Resources
|
2017-07-18 00:18:12 +00:00
|
|
|
|
|
|
|
// Update the task, such that it cannot be done in-place
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2017-07-18 00:18:12 +00:00
|
|
|
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-18 00:18:12 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-07-18 00:18:12 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-07-18 00:18:12 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted only MaxParallel
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
if len(update) != 1 {
|
|
|
|
t.Fatalf("bad: got %d; want %d: %#v", len(update), 1, plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
2018-10-03 16:47:18 +00:00
|
|
|
if len(planned) != 5 {
|
2017-07-18 00:18:12 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
|
|
|
// Check that the deployment id is attached to the eval
|
|
|
|
if h.Evals[0].DeploymentID == "" {
|
|
|
|
t.Fatalf("Eval not annotated with deployment id")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a deployment was created
|
|
|
|
if plan.Deployment == nil {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
2020-07-17 18:07:43 +00:00
|
|
|
dstate, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name]
|
2017-07-18 00:18:12 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
2020-07-17 18:07:43 +00:00
|
|
|
if dstate.DesiredTotal != 5 || dstate.DesiredCanaries != 0 {
|
|
|
|
t.Fatalf("bad: %#v", dstate)
|
2017-07-18 00:18:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-06 15:58:15 +00:00
|
|
|
func TestServiceSched_JobModify_Canaries(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-09-07 19:27:12 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2015-09-07 19:27:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
// Update the job
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
2017-07-06 15:58:15 +00:00
|
|
|
desiredUpdates := 2
|
|
|
|
job2.TaskGroups[0].Update = &structs.UpdateStrategy{
|
|
|
|
MaxParallel: desiredUpdates,
|
|
|
|
Canary: desiredUpdates,
|
|
|
|
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
|
|
|
|
MinHealthyTime: 10 * time.Second,
|
|
|
|
HealthyDeadline: 10 * time.Minute,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the task, such that it cannot be done in-place
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-06 15:58:15 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-07-06 15:58:15 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted nothing
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
if len(update) != 0 {
|
|
|
|
t.Fatalf("bad: got %d; want %d: %#v", len(update), 0, plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != desiredUpdates {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
2018-04-19 20:58:06 +00:00
|
|
|
for _, canary := range planned {
|
|
|
|
if canary.DeploymentStatus == nil || !canary.DeploymentStatus.Canary {
|
|
|
|
t.Fatalf("expected canary field to be set on canary alloc %q", canary.ID)
|
|
|
|
}
|
|
|
|
}
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
|
|
|
// Check that the deployment id is attached to the eval
|
|
|
|
if h.Evals[0].DeploymentID == "" {
|
|
|
|
t.Fatalf("Eval not annotated with deployment id")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a deployment was created
|
|
|
|
if plan.Deployment == nil {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
2020-01-22 20:34:03 +00:00
|
|
|
|
|
|
|
// Ensure local state was not altered in scheduler
|
2020-07-17 18:07:43 +00:00
|
|
|
staleDState, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name]
|
2020-01-22 20:34:03 +00:00
|
|
|
require.True(t, ok)
|
|
|
|
|
2020-07-17 18:07:43 +00:00
|
|
|
require.Equal(t, 0, len(staleDState.PlacedCanaries))
|
2020-01-22 20:34:03 +00:00
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
|
|
|
|
// Grab the latest state
|
|
|
|
deploy, err := h.State.DeploymentByID(ws, plan.Deployment.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
state, ok := deploy.TaskGroups[job.TaskGroups[0].Name]
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
require.Equal(t, 10, state.DesiredTotal)
|
|
|
|
require.Equal(t, state.DesiredCanaries, desiredUpdates)
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
// Assert the canaries were added to the placed list
|
|
|
|
if len(state.PlacedCanaries) != desiredUpdates {
|
2020-01-22 20:34:03 +00:00
|
|
|
assert.Fail(t, "expected PlacedCanaries to equal desiredUpdates", state)
|
2017-07-06 15:58:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceSched_JobModify_InPlace(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-07-06 15:58:15 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2017-07-06 15:58:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and create an older deployment
|
|
|
|
job := mock.Job()
|
|
|
|
d := mock.Deployment()
|
|
|
|
d.JobID = job.ID
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d))
|
2017-07-06 15:58:15 +00:00
|
|
|
|
2020-04-21 12:56:05 +00:00
|
|
|
taskName := job.TaskGroups[0].Tasks[0].Name
|
|
|
|
|
|
|
|
adr := structs.AllocatedDeviceResource{
|
|
|
|
Type: "gpu",
|
|
|
|
Vendor: "nvidia",
|
|
|
|
Name: "1080ti",
|
|
|
|
DeviceIDs: []string{uuid.Generate()},
|
|
|
|
}
|
|
|
|
|
2021-01-15 17:45:12 +00:00
|
|
|
asr := structs.AllocatedSharedResources{
|
|
|
|
Ports: structs.AllocatedPorts{{Label: "http"}},
|
|
|
|
Networks: structs.Networks{{Mode: "bridge"}},
|
|
|
|
}
|
|
|
|
|
2017-07-06 15:58:15 +00:00
|
|
|
// Create allocs that are part of the old deployment
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
2022-01-15 01:09:14 +00:00
|
|
|
alloc := mock.AllocForNode(nodes[i])
|
2017-07-06 15:58:15 +00:00
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.DeploymentID = d.ID
|
2022-08-17 16:26:34 +00:00
|
|
|
alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: pointer.Of(true)}
|
2020-04-21 12:56:05 +00:00
|
|
|
alloc.AllocatedResources.Tasks[taskName].Devices = []*structs.AllocatedDeviceResource{&adr}
|
2021-01-15 17:45:12 +00:00
|
|
|
alloc.AllocatedResources.Shared = asr
|
2017-07-06 15:58:15 +00:00
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
// Update the job
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
|
|
|
desiredUpdates := 4
|
|
|
|
job2.TaskGroups[0].Update = &structs.UpdateStrategy{
|
|
|
|
MaxParallel: desiredUpdates,
|
|
|
|
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
|
|
|
|
MinHealthyTime: 10 * time.Second,
|
|
|
|
HealthyDeadline: 10 * time.Minute,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-09-07 19:27:12 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-09-07 19:27:12 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan did not evict any allocs
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
if len(update) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan updated the existing allocs
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
for _, p := range planned {
|
|
|
|
if p.Job != job2 {
|
|
|
|
t.Fatalf("should update job")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2015-09-14 00:06:34 +00:00
|
|
|
|
2020-04-21 12:56:05 +00:00
|
|
|
// Verify the allocated networks and devices did not change
|
2017-12-08 22:50:06 +00:00
|
|
|
rp := structs.Port{Label: "admin", Value: 5000}
|
2015-09-14 00:06:34 +00:00
|
|
|
for _, alloc := range out {
|
2021-01-15 17:45:12 +00:00
|
|
|
// Verify Shared Allocared Resources Persisted
|
|
|
|
require.Equal(t, alloc.AllocatedResources.Shared.Ports, asr.Ports)
|
|
|
|
require.Equal(t, alloc.AllocatedResources.Shared.Networks, asr.Networks)
|
|
|
|
|
2020-04-21 12:56:05 +00:00
|
|
|
for _, resources := range alloc.AllocatedResources.Tasks {
|
2015-11-15 06:28:11 +00:00
|
|
|
if resources.Networks[0].ReservedPorts[0] != rp {
|
2015-09-14 00:06:34 +00:00
|
|
|
t.Fatalf("bad: %#v", alloc)
|
|
|
|
}
|
2020-04-21 12:56:05 +00:00
|
|
|
if len(resources.Devices) == 0 || reflect.DeepEqual(resources.Devices[0], adr) {
|
|
|
|
t.Fatalf("bad devices has changed: %#v", alloc)
|
|
|
|
}
|
2015-09-14 00:06:34 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
// Verify the deployment id was changed and health cleared
|
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.DeploymentID == d.ID {
|
|
|
|
t.Fatalf("bad: deployment id not cleared")
|
|
|
|
} else if alloc.DeploymentStatus != nil {
|
|
|
|
t.Fatalf("bad: deployment status not cleared")
|
|
|
|
}
|
|
|
|
}
|
2015-09-07 19:27:12 +00:00
|
|
|
}
|
|
|
|
|
2019-10-23 22:23:16 +00:00
|
|
|
// TestServiceSched_JobModify_InPlace08 asserts that inplace updates of
|
|
|
|
// allocations created with Nomad 0.8 do not cause panics.
|
|
|
|
//
|
|
|
|
// COMPAT(0.11) - While we do not guarantee that upgrades from 0.8 -> 0.10
|
|
|
|
// (skipping 0.9) are safe, we do want to avoid panics in the scheduler which
|
|
|
|
// cause unrecoverable server outages with no chance of recovery.
|
|
|
|
//
|
|
|
|
// Safe to remove in 0.11.0 as no one should ever be trying to upgrade from 0.8
|
|
|
|
// to 0.11!
|
|
|
|
func TestServiceSched_JobModify_InPlace08(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2019-10-23 22:23:16 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Generate a fake job with 0.8 allocations
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 1
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Create 0.8 alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job.Copy()
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.AllocatedResources = nil // 0.8 didn't have this
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc}))
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Update the job inplace
|
|
|
|
job2 := job.Copy()
|
|
|
|
|
|
|
|
job2.TaskGroups[0].Tasks[0].Services[0].Tags[0] = "newtag"
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
require.Len(t, h.Plans, 1)
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan did not evict any allocs
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
require.Zero(t, update)
|
|
|
|
|
|
|
|
// Ensure the plan updated the existing alloc
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
require.Len(t, planned, 1)
|
|
|
|
for _, p := range planned {
|
|
|
|
require.Equal(t, job2, p.Job)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
require.Len(t, out, 1)
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
|
|
|
newAlloc := out[0]
|
|
|
|
|
|
|
|
// Verify AllocatedResources was set
|
|
|
|
require.NotNil(t, newAlloc.AllocatedResources)
|
|
|
|
}
|
|
|
|
|
2017-03-08 19:47:55 +00:00
|
|
|
func TestServiceSched_JobModify_DistinctProperty(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-03-08 19:47:55 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
node.Meta["rack"] = fmt.Sprintf("rack%d", i)
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2017-03-08 19:47:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job that uses distinct property and has count higher than what is
|
|
|
|
// possible.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 11
|
|
|
|
job.Constraints = append(job.Constraints,
|
|
|
|
&structs.Constraint{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.rack}",
|
|
|
|
})
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-03-08 19:47:55 +00:00
|
|
|
|
|
|
|
oldJob := job.Copy()
|
|
|
|
oldJob.JobModifyIndex -= 1
|
|
|
|
oldJob.TaskGroups[0].Count = 4
|
|
|
|
|
|
|
|
// Place 4 of 10
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 4; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = oldJob
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2017-03-08 19:47:55 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-08 19:47:55 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-03-08 19:47:55 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-03-08 19:47:55 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval hasn't spawned blocked eval
|
|
|
|
if len(h.CreateEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan failed to alloc
|
|
|
|
outEval := h.Evals[0]
|
|
|
|
if len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %+v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", planned)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-03-08 19:47:55 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure different node was used per.
|
|
|
|
used := make(map[string]struct{})
|
|
|
|
for _, alloc := range out {
|
|
|
|
if _, ok := used[alloc.NodeID]; ok {
|
|
|
|
t.Fatalf("Node collision %v", alloc.NodeID)
|
|
|
|
}
|
|
|
|
used[alloc.NodeID] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2019-12-03 14:14:49 +00:00
|
|
|
// TestServiceSched_JobModify_NodeReschedulePenalty ensures that
|
|
|
|
// a failing allocation gets rescheduled with a penalty to the old
|
|
|
|
// node, but an updated job doesn't apply the penalty.
|
|
|
|
func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2019-12-03 14:14:49 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2019-12-03 14:14:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 1,
|
|
|
|
Interval: 15 * time.Minute,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
MaxDelay: 1 * time.Minute,
|
|
|
|
DelayFunction: "constant",
|
|
|
|
}
|
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2019-12-03 14:14:49 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
// Mark one of the allocations as failed
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
|
|
|
failedAlloc := allocs[1]
|
|
|
|
failedAllocID := failedAlloc.ID
|
|
|
|
successAllocID := allocs[0].ID
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2019-12-03 14:14:49 +00:00
|
|
|
|
|
|
|
// Create and process a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2019-12-03 14:14:49 +00:00
|
|
|
require.NoError(h.Process(NewServiceScheduler, eval))
|
|
|
|
|
|
|
|
// Ensure we have one plan
|
|
|
|
require.Equal(1, len(h.Plans))
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Verify that one new allocation got created with its restart tracker info
|
|
|
|
require.Equal(3, len(out))
|
|
|
|
var newAlloc *structs.Allocation
|
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.ID != successAllocID && alloc.ID != failedAllocID {
|
|
|
|
newAlloc = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.Equal(failedAllocID, newAlloc.PreviousAllocation)
|
|
|
|
require.Equal(1, len(newAlloc.RescheduleTracker.Events))
|
|
|
|
require.Equal(failedAllocID, newAlloc.RescheduleTracker.Events[0].PrevAllocID)
|
|
|
|
|
|
|
|
// Verify that the node-reschedule penalty was applied to the new alloc
|
|
|
|
for _, scoreMeta := range newAlloc.Metrics.ScoreMetaData {
|
|
|
|
if scoreMeta.NodeID == failedAlloc.NodeID {
|
|
|
|
require.Equal(-1.0, scoreMeta.Scores["node-reschedule-penalty"],
|
|
|
|
"eval to replace failed alloc missing node-reshedule-penalty: %v",
|
|
|
|
scoreMeta.Scores,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the job, such that it cannot be done in-place
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2019-12-03 14:14:49 +00:00
|
|
|
|
|
|
|
// Create and process a mock evaluation
|
|
|
|
eval = &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2019-12-03 14:14:49 +00:00
|
|
|
require.NoError(h.Process(NewServiceScheduler, eval))
|
|
|
|
|
|
|
|
// Lookup the new allocations by JobID
|
|
|
|
out, err = h.State.AllocsByJob(ws, job.Namespace, job2.ID, false)
|
|
|
|
require.NoError(err)
|
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
|
|
|
require.Equal(2, len(out))
|
|
|
|
|
|
|
|
// No new allocs have node-reschedule-penalty
|
|
|
|
for _, alloc := range out {
|
|
|
|
require.Nil(alloc.RescheduleTracker)
|
|
|
|
require.NotNil(alloc.Metrics)
|
|
|
|
for _, scoreMeta := range alloc.Metrics.ScoreMetaData {
|
|
|
|
if scoreMeta.NodeID != failedAlloc.NodeID {
|
|
|
|
require.Equal(0.0, scoreMeta.Scores["node-reschedule-penalty"],
|
|
|
|
"eval for updated job should not include node-reshedule-penalty: %v",
|
|
|
|
scoreMeta.Scores,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-15 03:54:30 +00:00
|
|
|
func TestServiceSched_JobDeregister_Purged(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-08-11 21:54:21 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2016-07-22 21:53:49 +00:00
|
|
|
for _, alloc := range allocs {
|
|
|
|
h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2015-08-11 21:54:21 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deregister the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-11 21:54:21 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobDeregister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-11 21:54:21 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-08-11 21:54:21 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all nodes
|
2016-01-14 20:57:43 +00:00
|
|
|
if len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"]) != len(allocs) {
|
2017-04-15 03:54:30 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
// Ensure that the job field on the allocation is still populated
|
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.Job == nil {
|
|
|
|
t.Fatalf("bad: %#v", alloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure no remaining allocations
|
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceSched_JobDeregister_Stopped(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-04-15 03:54:30 +00:00
|
|
|
h := NewHarness(t)
|
2018-06-13 17:46:39 +00:00
|
|
|
require := require.New(t)
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
|
|
|
job.Stop = true
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2018-06-13 17:46:39 +00:00
|
|
|
|
|
|
|
// Create a summary where the queued allocs are set as we want to assert
|
|
|
|
// they get zeroed out.
|
|
|
|
summary := mock.JobSummary(job.ID)
|
|
|
|
web := summary.Summary["web"]
|
|
|
|
web.Queued = 2
|
|
|
|
require.NoError(h.State.UpsertJobSummary(h.NextIndex(), summary))
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deregister the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-04-15 03:54:30 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobDeregister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-04-15 03:54:30 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
2018-06-13 17:46:39 +00:00
|
|
|
require.NoError(h.Process(NewServiceScheduler, eval))
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
// Ensure a single plan
|
2018-06-13 17:46:39 +00:00
|
|
|
require.Len(h.Plans, 1)
|
2017-04-15 03:54:30 +00:00
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all nodes
|
2018-06-13 17:46:39 +00:00
|
|
|
require.Len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"], len(allocs))
|
2015-08-11 21:54:21 +00:00
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2018-06-13 17:46:39 +00:00
|
|
|
require.NoError(err)
|
2015-08-11 21:54:21 +00:00
|
|
|
|
2016-02-24 22:50:59 +00:00
|
|
|
// Ensure that the job field on the allocation is still populated
|
|
|
|
for _, alloc := range out {
|
2018-06-13 17:46:39 +00:00
|
|
|
require.NotNil(alloc.Job)
|
2016-02-24 22:50:59 +00:00
|
|
|
}
|
|
|
|
|
2015-08-11 21:54:21 +00:00
|
|
|
// Ensure no remaining allocations
|
2016-08-30 22:36:30 +00:00
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
2018-06-13 17:46:39 +00:00
|
|
|
require.Empty(out)
|
|
|
|
|
|
|
|
// Assert the job summary is cleared out
|
|
|
|
sout, err := h.State.JobSummaryByID(ws, job.Namespace, job.ID)
|
|
|
|
require.NoError(err)
|
|
|
|
require.NotNil(sout)
|
|
|
|
require.Contains(sout.Summary, "web")
|
|
|
|
webOut := sout.Summary["web"]
|
|
|
|
require.Zero(webOut.Queued)
|
2015-08-15 21:47:13 +00:00
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2015-08-11 21:54:21 +00:00
|
|
|
}
|
2015-08-14 01:51:08 +00:00
|
|
|
|
2016-07-28 00:49:53 +00:00
|
|
|
func TestServiceSched_NodeDown(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
cases := []struct {
|
|
|
|
desired string
|
|
|
|
client string
|
|
|
|
migrate bool
|
|
|
|
reschedule bool
|
|
|
|
terminal bool
|
|
|
|
lost bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusStop,
|
|
|
|
client: structs.AllocClientStatusRunning,
|
|
|
|
lost: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusRun,
|
|
|
|
client: structs.AllocClientStatusPending,
|
|
|
|
migrate: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusRun,
|
|
|
|
client: structs.AllocClientStatusRunning,
|
|
|
|
migrate: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusRun,
|
|
|
|
client: structs.AllocClientStatusLost,
|
|
|
|
terminal: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusRun,
|
|
|
|
client: structs.AllocClientStatusComplete,
|
|
|
|
terminal: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusRun,
|
|
|
|
client: structs.AllocClientStatusFailed,
|
|
|
|
reschedule: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusEvict,
|
|
|
|
client: structs.AllocClientStatusRunning,
|
|
|
|
lost: true,
|
|
|
|
},
|
2016-07-28 00:49:53 +00:00
|
|
|
}
|
2016-08-04 18:24:17 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
for i, tc := range cases {
|
|
|
|
t.Run(fmt.Sprintf(""), func(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
2016-07-28 00:49:53 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
// Register a node
|
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusDown
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2019-06-06 19:50:23 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2018-02-21 18:58:04 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2016-07-28 00:49:53 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
alloc.DesiredStatus = tc.desired
|
|
|
|
alloc.ClientStatus = tc.client
|
2016-07-28 00:49:53 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
// Mark for migration if necessary
|
2022-08-17 16:26:34 +00:00
|
|
|
alloc.DesiredTransition.Migrate = pointer.Of(tc.migrate)
|
2016-07-28 00:49:53 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
allocs := []*structs.Allocation{alloc}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2016-08-03 22:45:42 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-08-03 22:45:42 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
2019-06-06 19:50:23 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
if tc.terminal {
|
|
|
|
// No plan for terminal state allocs
|
|
|
|
require.Len(t, h.Plans, 0)
|
2019-06-06 19:50:23 +00:00
|
|
|
} else {
|
2020-01-06 20:56:31 +00:00
|
|
|
require.Len(t, h.Plans, 1)
|
|
|
|
|
|
|
|
plan := h.Plans[0]
|
|
|
|
out := plan.NodeUpdate[node.ID]
|
|
|
|
require.Len(t, out, 1)
|
|
|
|
|
|
|
|
outAlloc := out[0]
|
|
|
|
if tc.migrate {
|
|
|
|
require.NotEqual(t, structs.AllocClientStatusLost, outAlloc.ClientStatus)
|
|
|
|
} else if tc.reschedule {
|
|
|
|
require.Equal(t, structs.AllocClientStatusFailed, outAlloc.ClientStatus)
|
|
|
|
} else if tc.lost {
|
|
|
|
require.Equal(t, structs.AllocClientStatusLost, outAlloc.ClientStatus)
|
|
|
|
} else {
|
|
|
|
require.Fail(t, "unexpected alloc update")
|
|
|
|
}
|
2019-06-06 19:50:23 +00:00
|
|
|
}
|
2020-01-06 20:56:31 +00:00
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2019-06-06 19:50:23 +00:00
|
|
|
})
|
2016-07-28 00:49:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-13 20:39:04 +00:00
|
|
|
func TestServiceSched_StopAfterClientDisconnect(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-05-13 20:39:04 +00:00
|
|
|
cases := []struct {
|
|
|
|
stop time.Duration
|
|
|
|
when time.Time
|
|
|
|
rescheduled bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
rescheduled: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
stop: 1 * time.Second,
|
|
|
|
rescheduled: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
stop: 1 * time.Second,
|
|
|
|
when: time.Now().UTC().Add(-10 * time.Second),
|
|
|
|
rescheduled: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
stop: 1 * time.Second,
|
|
|
|
when: time.Now().UTC().Add(10 * time.Minute),
|
|
|
|
rescheduled: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, tc := range cases {
|
|
|
|
t.Run(fmt.Sprintf(""), func(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Node, which is down
|
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusDown
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2020-05-13 20:39:04 +00:00
|
|
|
|
|
|
|
// Job with allocations and stop_after_client_disconnect
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 1
|
|
|
|
job.TaskGroups[0].StopAfterClientDisconnect = &tc.stop
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2020-05-13 20:39:04 +00:00
|
|
|
|
|
|
|
// Alloc for the running group
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
if !tc.when.IsZero() {
|
|
|
|
alloc.AllocStates = []*structs.AllocState{{
|
|
|
|
Field: structs.AllocStateFieldClientStatus,
|
|
|
|
Value: structs.AllocClientStatusLost,
|
|
|
|
Time: tc.when,
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
allocs := []*structs.Allocation{alloc}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2020-05-13 20:39:04 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
evals := []*structs.Evaluation{{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeDrain,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}}
|
|
|
|
eval := evals[0]
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), evals))
|
2020-05-13 20:39:04 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, h.Evals[0].Status, structs.EvalStatusComplete)
|
|
|
|
require.Len(t, h.Plans, 1, "plan")
|
|
|
|
|
2020-06-03 13:48:38 +00:00
|
|
|
// One followup eval created, either delayed or blocked
|
|
|
|
require.Len(t, h.CreateEvals, 1)
|
2020-05-13 20:39:04 +00:00
|
|
|
e := h.CreateEvals[0]
|
|
|
|
require.Equal(t, eval.ID, e.PreviousEval)
|
|
|
|
|
|
|
|
if tc.rescheduled {
|
|
|
|
require.Equal(t, "blocked", e.Status)
|
|
|
|
} else {
|
|
|
|
require.Equal(t, "pending", e.Status)
|
|
|
|
require.NotEmpty(t, e.WaitUntil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This eval is still being inserted in the state store
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
found, err := h.State.EvalByID(ws, e.ID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if found == nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
alloc, err = h.State.AllocByID(ws, alloc.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Allocations have been transitioned to lost
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusStop, alloc.DesiredStatus)
|
|
|
|
require.Equal(t, structs.AllocClientStatusLost, alloc.ClientStatus)
|
|
|
|
// At least 1, 2 if we manually set the tc.when
|
|
|
|
require.NotEmpty(t, alloc.AllocStates)
|
|
|
|
|
|
|
|
if tc.rescheduled {
|
|
|
|
// Register a new node, leave it up, process the followup eval
|
|
|
|
node = mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2020-05-13 20:39:04 +00:00
|
|
|
require.NoError(t, h.Process(NewServiceScheduler, eval))
|
|
|
|
|
|
|
|
as, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
as, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return len(as) == 2, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
a2 := as[0]
|
|
|
|
if a2.ID == alloc.ID {
|
|
|
|
a2 = as[1]
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, structs.AllocClientStatusPending, a2.ClientStatus)
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusRun, a2.DesiredStatus)
|
|
|
|
require.Equal(t, node.ID, a2.NodeID)
|
|
|
|
|
|
|
|
// No blocked evals
|
|
|
|
require.Empty(t, h.ReblockEvals)
|
|
|
|
require.Len(t, h.CreateEvals, 1)
|
|
|
|
require.Equal(t, h.CreateEvals[0].ID, e.ID)
|
|
|
|
} else {
|
|
|
|
// No new alloc was created
|
|
|
|
as, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, as, 1)
|
|
|
|
old := as[0]
|
|
|
|
|
|
|
|
require.Equal(t, alloc.ID, old.ID)
|
|
|
|
require.Equal(t, structs.AllocClientStatusLost, old.ClientStatus)
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusStop, old.DesiredStatus)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 19:13:35 +00:00
|
|
|
func TestServiceSched_NodeUpdate(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-07-28 19:13:35 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Register a node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-07-28 19:13:35 +00:00
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-07-28 19:13:35 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2016-07-28 19:13:35 +00:00
|
|
|
|
|
|
|
// Mark some allocs as running
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2016-07-28 19:13:35 +00:00
|
|
|
for i := 0; i < 4; i++ {
|
2017-02-08 05:22:48 +00:00
|
|
|
out, _ := h.State.AllocByID(ws, allocs[i].ID)
|
2016-07-28 19:13:35 +00:00
|
|
|
out.ClientStatus = structs.AllocClientStatusRunning
|
2020-10-02 20:13:49 +00:00
|
|
|
require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{out}))
|
2016-07-28 19:13:35 +00:00
|
|
|
}
|
|
|
|
|
2016-07-28 19:22:44 +00:00
|
|
|
// Create a mock evaluation which won't trigger any new placements
|
2016-07-28 19:13:35 +00:00
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-07-28 19:13:35 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-07-28 19:13:35 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-07-28 19:13:35 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-07-28 21:02:50 +00:00
|
|
|
if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 {
|
2016-07-28 19:13:35 +00:00
|
|
|
t.Fatalf("bad queued allocations: %v", h.Evals[0].QueuedAllocations)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2015-08-14 01:51:08 +00:00
|
|
|
func TestServiceSched_NodeDrain(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-08-14 05:11:32 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Register a draining node
|
2021-02-11 15:40:59 +00:00
|
|
|
node := mock.DrainNode()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2015-08-14 05:11:32 +00:00
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2015-08-14 05:11:32 +00:00
|
|
|
}
|
|
|
|
|
2016-02-03 22:15:02 +00:00
|
|
|
// Generate a fake job with allocations and an update policy.
|
2015-08-14 05:11:32 +00:00
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2015-08-14 05:11:32 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
2015-09-07 02:47:02 +00:00
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2022-08-17 16:26:34 +00:00
|
|
|
alloc.DesiredTransition.Migrate = pointer.Of(true)
|
2015-08-14 05:11:32 +00:00
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2015-08-14 05:11:32 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-14 05:11:32 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-14 05:11:32 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-08-14 05:11:32 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all allocs
|
2015-08-26 00:06:06 +00:00
|
|
|
if len(plan.NodeUpdate[node.ID]) != len(allocs) {
|
2015-08-14 05:11:32 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2015-08-14 05:11:32 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2016-08-30 22:36:30 +00:00
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
2015-08-14 05:11:32 +00:00
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
2015-08-15 21:47:13 +00:00
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-08-09 21:48:25 +00:00
|
|
|
func TestServiceSched_NodeDrain_Down(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-08-09 21:48:25 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Register a draining node
|
2021-02-11 15:40:59 +00:00
|
|
|
node := mock.DrainNode()
|
2016-08-09 21:48:25 +00:00
|
|
|
node.Status = structs.NodeStatusDown
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
// Set the desired state of the allocs to stop
|
|
|
|
var stop []*structs.Allocation
|
2018-02-21 18:58:04 +00:00
|
|
|
for i := 0; i < 6; i++ {
|
2016-08-09 21:48:25 +00:00
|
|
|
newAlloc := allocs[i].Copy()
|
|
|
|
newAlloc.ClientStatus = structs.AllocDesiredStatusStop
|
2022-08-17 16:26:34 +00:00
|
|
|
newAlloc.DesiredTransition.Migrate = pointer.Of(true)
|
2016-08-09 21:48:25 +00:00
|
|
|
stop = append(stop, newAlloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), stop))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
// Mark some of the allocations as running
|
|
|
|
var running []*structs.Allocation
|
|
|
|
for i := 4; i < 6; i++ {
|
|
|
|
newAlloc := stop[i].Copy()
|
|
|
|
newAlloc.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
running = append(running, newAlloc)
|
|
|
|
}
|
2020-10-04 19:12:35 +00:00
|
|
|
require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), running))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
// Mark some of the allocations as complete
|
|
|
|
var complete []*structs.Allocation
|
|
|
|
for i := 6; i < 10; i++ {
|
2018-02-21 18:58:04 +00:00
|
|
|
newAlloc := allocs[i].Copy()
|
2018-01-14 22:47:21 +00:00
|
|
|
newAlloc.TaskStates = make(map[string]*structs.TaskState)
|
|
|
|
newAlloc.TaskStates["web"] = &structs.TaskState{
|
|
|
|
State: structs.TaskStateDead,
|
|
|
|
Events: []*structs.TaskEvent{
|
|
|
|
{
|
|
|
|
Type: structs.TaskTerminated,
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2016-08-09 21:48:25 +00:00
|
|
|
newAlloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
complete = append(complete, newAlloc)
|
|
|
|
}
|
2020-10-04 19:12:35 +00:00
|
|
|
require.NoError(t, h.State.UpdateAllocsFromClient(structs.MsgTypeTestSetup, h.NextIndex(), complete))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with the node update
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-09 21:48:25 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-09 21:48:25 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-08-09 21:48:25 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted non terminal allocs
|
|
|
|
if len(plan.NodeUpdate[node.ID]) != 6 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that all the allocations which were in running or pending state
|
|
|
|
// has been marked as lost
|
|
|
|
var lostAllocs []string
|
|
|
|
for _, alloc := range plan.NodeUpdate[node.ID] {
|
|
|
|
lostAllocs = append(lostAllocs, alloc.ID)
|
|
|
|
}
|
|
|
|
sort.Strings(lostAllocs)
|
|
|
|
|
|
|
|
var expectedLostAllocs []string
|
|
|
|
for i := 0; i < 6; i++ {
|
|
|
|
expectedLostAllocs = append(expectedLostAllocs, allocs[i].ID)
|
|
|
|
}
|
|
|
|
sort.Strings(expectedLostAllocs)
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(expectedLostAllocs, lostAllocs) {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", expectedLostAllocs, lostAllocs)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-07-22 17:18:23 +00:00
|
|
|
func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-07-22 17:18:23 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Register a draining node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-07-22 17:18:23 +00:00
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-07-22 17:18:23 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2022-08-17 16:26:34 +00:00
|
|
|
alloc.DesiredTransition.Migrate = pointer.Of(true)
|
2016-07-22 17:18:23 +00:00
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2016-07-22 17:18:23 +00:00
|
|
|
|
2021-02-11 15:40:59 +00:00
|
|
|
node.DrainStrategy = mock.DrainNode().DrainStrategy
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-07-22 17:18:23 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-07-22 17:18:23 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-07-22 17:18:23 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-07-22 17:18:23 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
queued := h.Evals[0].QueuedAllocations["web"]
|
|
|
|
if queued != 2 {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", 2, queued)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-17 23:21:46 +00:00
|
|
|
// TestServiceSched_NodeDrain_TaskHandle asserts that allocations with task
|
|
|
|
// handles have them propagated to replacement allocations when drained.
|
|
|
|
func TestServiceSched_NodeDrain_TaskHandle(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-12-17 23:21:46 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2022-08-17 16:26:34 +00:00
|
|
|
alloc.DesiredTransition.Migrate = pointer.Of(true)
|
2020-12-17 23:21:46 +00:00
|
|
|
alloc.TaskStates = map[string]*structs.TaskState{
|
2021-10-01 13:59:55 +00:00
|
|
|
"web": {
|
2020-12-17 23:21:46 +00:00
|
|
|
TaskHandle: &structs.TaskHandle{
|
|
|
|
Version: 1,
|
|
|
|
DriverState: []byte("test-driver-state"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
|
|
|
|
|
|
|
node.DrainStrategy = mock.DrainNode().DrainStrategy
|
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
require.Len(t, h.Plans, 1)
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all allocs
|
|
|
|
require.Len(t, plan.NodeUpdate[node.ID], len(allocs))
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
require.Len(t, planned, len(allocs))
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
|
|
|
require.Len(t, out, len(allocs))
|
|
|
|
|
|
|
|
// Ensure task states were propagated
|
|
|
|
for _, a := range out {
|
|
|
|
require.NotEmpty(t, a.TaskStates)
|
|
|
|
require.NotEmpty(t, a.TaskStates["web"])
|
|
|
|
require.NotNil(t, a.TaskStates["web"].TaskHandle)
|
|
|
|
assert.Equal(t, 1, a.TaskStates["web"].TaskHandle.Version)
|
|
|
|
assert.Equal(t, []byte("test-driver-state"), a.TaskStates["web"].TaskHandle.DriverState)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2015-08-15 21:47:13 +00:00
|
|
|
func TestServiceSched_RetryLimit(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2015-08-15 21:47:13 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
h.Planner = &RejectPlan{h}
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2015-08-15 21:47:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2015-08-15 21:47:13 +00:00
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Create a mock evaluation to register the job
|
2015-08-15 21:47:13 +00:00
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-15 21:47:13 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-15 21:47:13 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-08-15 21:47:13 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure multiple plans
|
|
|
|
if len(h.Plans) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2015-08-15 21:47:13 +00:00
|
|
|
|
|
|
|
// Ensure no allocations placed
|
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should hit the retry limit
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusFailed)
|
2015-08-14 01:51:08 +00:00
|
|
|
}
|
2016-02-03 01:19:41 +00:00
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
func TestServiceSched_Reschedule_OnceNow(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-01-14 22:47:21 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
2018-03-02 00:23:44 +00:00
|
|
|
Attempts: 1,
|
|
|
|
Interval: 15 * time.Minute,
|
|
|
|
Delay: 5 * time.Second,
|
2018-03-13 15:06:26 +00:00
|
|
|
MaxDelay: 1 * time.Minute,
|
2018-03-26 19:45:09 +00:00
|
|
|
DelayFunction: "constant",
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
2018-03-02 00:23:44 +00:00
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
// Mark one of the allocations as failed
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
2018-03-02 00:23:44 +00:00
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
2018-01-14 22:47:21 +00:00
|
|
|
failedAllocID := allocs[1].ID
|
|
|
|
successAllocID := allocs[0].ID
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure multiple plans
|
|
|
|
if len(h.Plans) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Verify that one new allocation got created with its restart tracker info
|
|
|
|
assert := assert.New(t)
|
|
|
|
assert.Equal(3, len(out))
|
|
|
|
var newAlloc *structs.Allocation
|
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.ID != successAllocID && alloc.ID != failedAllocID {
|
|
|
|
newAlloc = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert.Equal(failedAllocID, newAlloc.PreviousAllocation)
|
2018-01-17 19:22:30 +00:00
|
|
|
assert.Equal(1, len(newAlloc.RescheduleTracker.Events))
|
|
|
|
assert.Equal(failedAllocID, newAlloc.RescheduleTracker.Events[0].PrevAllocID)
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Mark this alloc as failed again, should not get rescheduled
|
|
|
|
newAlloc.ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{newAlloc}))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Create another mock evaluation
|
|
|
|
eval = &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err = h.Process(NewServiceScheduler, eval)
|
|
|
|
assert.Nil(err)
|
|
|
|
// Verify no new allocs were created this time
|
|
|
|
out, err = h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-14 22:47:21 +00:00
|
|
|
assert.Equal(3, len(out))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
// Tests that alloc reschedulable at a future time creates a follow up eval
|
|
|
|
func TestServiceSched_Reschedule_Later(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
require := require.New(t)
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2018-03-02 00:23:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
delayDuration := 15 * time.Second
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 1,
|
|
|
|
Interval: 15 * time.Minute,
|
|
|
|
Delay: delayDuration,
|
2018-03-13 15:06:26 +00:00
|
|
|
MaxDelay: 1 * time.Minute,
|
2018-03-26 19:45:09 +00:00
|
|
|
DelayFunction: "constant",
|
2018-03-02 00:23:44 +00:00
|
|
|
}
|
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
// Mark one of the allocations as failed
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now}}
|
|
|
|
failedAllocID := allocs[1].ID
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure multiple plans
|
|
|
|
if len(h.Plans) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(err)
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Verify no new allocs were created
|
|
|
|
require.Equal(2, len(out))
|
|
|
|
|
|
|
|
// Verify follow up eval was created for the failed alloc
|
|
|
|
alloc, err := h.State.AllocByID(ws, failedAllocID)
|
|
|
|
require.Nil(err)
|
|
|
|
require.NotEmpty(alloc.FollowupEvalID)
|
|
|
|
|
|
|
|
// Ensure there is a follow up eval.
|
|
|
|
if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusPending {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
followupEval := h.CreateEvals[0]
|
|
|
|
require.Equal(now.Add(delayDuration), followupEval.WaitUntil)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceSched_Reschedule_MultipleNow(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-01-14 22:47:21 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
maxRestartAttempts := 3
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
2018-03-02 00:23:44 +00:00
|
|
|
Attempts: maxRestartAttempts,
|
|
|
|
Interval: 30 * time.Minute,
|
|
|
|
Delay: 5 * time.Second,
|
2018-03-26 19:45:09 +00:00
|
|
|
DelayFunction: "constant",
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
2018-03-02 00:23:44 +00:00
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
// Mark one of the allocations as failed
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
2018-03-02 00:23:44 +00:00
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
2018-01-14 22:47:21 +00:00
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
expectedNumAllocs := 3
|
|
|
|
expectedNumReschedTrackers := 1
|
|
|
|
|
2018-01-24 15:33:55 +00:00
|
|
|
failedAllocId := allocs[1].ID
|
|
|
|
failedNodeID := allocs[1].NodeID
|
|
|
|
|
2018-01-14 22:47:21 +00:00
|
|
|
assert := assert.New(t)
|
|
|
|
for i := 0; i < maxRestartAttempts; i++ {
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Ensure multiple plans
|
|
|
|
if len(h.Plans) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Verify that a new allocation got created with its restart tracker info
|
|
|
|
assert.Equal(expectedNumAllocs, len(out))
|
|
|
|
|
|
|
|
// Find the new alloc with ClientStatusPending
|
|
|
|
var pendingAllocs []*structs.Allocation
|
2018-01-24 20:56:57 +00:00
|
|
|
var prevFailedAlloc *structs.Allocation
|
|
|
|
|
2018-01-14 22:47:21 +00:00
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.ClientStatus == structs.AllocClientStatusPending {
|
|
|
|
pendingAllocs = append(pendingAllocs, alloc)
|
|
|
|
}
|
2018-01-24 20:56:57 +00:00
|
|
|
if alloc.ID == failedAllocId {
|
|
|
|
prevFailedAlloc = alloc
|
|
|
|
}
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
assert.Equal(1, len(pendingAllocs))
|
|
|
|
newAlloc := pendingAllocs[0]
|
2018-01-17 19:22:30 +00:00
|
|
|
assert.Equal(expectedNumReschedTrackers, len(newAlloc.RescheduleTracker.Events))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
2018-01-24 15:33:55 +00:00
|
|
|
// Verify the previous NodeID in the most recent reschedule event
|
|
|
|
reschedEvents := newAlloc.RescheduleTracker.Events
|
|
|
|
assert.Equal(failedAllocId, reschedEvents[len(reschedEvents)-1].PrevAllocID)
|
|
|
|
assert.Equal(failedNodeID, reschedEvents[len(reschedEvents)-1].PrevNodeID)
|
|
|
|
|
2018-01-24 20:56:57 +00:00
|
|
|
// Verify that the next alloc of the failed alloc is the newly rescheduled alloc
|
|
|
|
assert.Equal(newAlloc.ID, prevFailedAlloc.NextAllocation)
|
|
|
|
|
2018-01-14 22:47:21 +00:00
|
|
|
// Mark this alloc as failed again
|
|
|
|
newAlloc.ClientStatus = structs.AllocClientStatusFailed
|
2018-03-02 00:23:44 +00:00
|
|
|
newAlloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-12 * time.Second),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
2018-01-14 22:47:21 +00:00
|
|
|
|
2018-01-24 15:33:55 +00:00
|
|
|
failedAllocId = newAlloc.ID
|
|
|
|
failedNodeID = newAlloc.NodeID
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{newAlloc}))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Create another mock evaluation
|
|
|
|
eval = &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-01-14 22:47:21 +00:00
|
|
|
expectedNumAllocs += 1
|
|
|
|
expectedNumReschedTrackers += 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process last eval again, should not reschedule
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
assert.Nil(err)
|
|
|
|
|
|
|
|
// Verify no new allocs were created because restart attempts were exhausted
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-14 22:47:21 +00:00
|
|
|
assert.Equal(5, len(out)) // 2 original, plus 3 reschedule attempts
|
|
|
|
}
|
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
// Tests that old reschedule attempts are pruned
|
|
|
|
func TestServiceSched_Reschedule_PruneEvents(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2018-03-02 00:23:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
DelayFunction: "exponential",
|
2018-03-13 15:06:26 +00:00
|
|
|
MaxDelay: 1 * time.Hour,
|
2018-03-02 00:23:44 +00:00
|
|
|
Delay: 5 * time.Second,
|
|
|
|
Unlimited: true,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
now := time.Now()
|
|
|
|
// Mark allocations as failed with restart info
|
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{job.TaskGroups[0].Name: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-15 * time.Minute)}}
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
|
|
|
|
allocs[1].RescheduleTracker = &structs.RescheduleTracker{
|
|
|
|
Events: []*structs.RescheduleEvent{
|
|
|
|
{RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
|
|
|
|
PrevAllocID: uuid.Generate(),
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{RescheduleTime: now.Add(-40 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevAllocID: allocs[0].ID,
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 10 * time.Second,
|
|
|
|
},
|
|
|
|
{RescheduleTime: now.Add(-30 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevAllocID: allocs[0].ID,
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 20 * time.Second,
|
|
|
|
},
|
|
|
|
{RescheduleTime: now.Add(-20 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevAllocID: allocs[0].ID,
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 40 * time.Second,
|
|
|
|
},
|
|
|
|
{RescheduleTime: now.Add(-10 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevAllocID: allocs[0].ID,
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 80 * time.Second,
|
|
|
|
},
|
|
|
|
{RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevAllocID: allocs[0].ID,
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 160 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
expectedFirstRescheduleEvent := allocs[1].RescheduleTracker.Events[1]
|
|
|
|
expectedDelay := 320 * time.Second
|
|
|
|
failedAllocID := allocs[1].ID
|
|
|
|
successAllocID := allocs[0].ID
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure multiple plans
|
|
|
|
if len(h.Plans) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Verify that one new allocation got created with its restart tracker info
|
|
|
|
assert := assert.New(t)
|
|
|
|
assert.Equal(3, len(out))
|
|
|
|
var newAlloc *structs.Allocation
|
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.ID != successAllocID && alloc.ID != failedAllocID {
|
|
|
|
newAlloc = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert.Equal(failedAllocID, newAlloc.PreviousAllocation)
|
|
|
|
// Verify that the new alloc copied the last 5 reschedule attempts
|
|
|
|
assert.Equal(6, len(newAlloc.RescheduleTracker.Events))
|
|
|
|
assert.Equal(expectedFirstRescheduleEvent, newAlloc.RescheduleTracker.Events[0])
|
|
|
|
|
|
|
|
mostRecentRescheduleEvent := newAlloc.RescheduleTracker.Events[5]
|
|
|
|
// Verify that the failed alloc ID is in the most recent reschedule event
|
|
|
|
assert.Equal(failedAllocID, mostRecentRescheduleEvent.PrevAllocID)
|
|
|
|
// Verify that the delay value was captured correctly
|
|
|
|
assert.Equal(expectedDelay, mostRecentRescheduleEvent.Delay)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-04-23 23:35:25 +00:00
|
|
|
// Tests that deployments with failed allocs result in placements as long as the
|
|
|
|
// deployment is running.
|
|
|
|
func TestDeployment_FailedAllocs_Reschedule(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-04-23 23:35:25 +00:00
|
|
|
for _, failedDeployment := range []bool{false, true} {
|
|
|
|
t.Run(fmt.Sprintf("Failed Deployment: %v", failedDeployment), func(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
require := require.New(t)
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2018-04-23 23:35:25 +00:00
|
|
|
}
|
2018-02-02 23:22:37 +00:00
|
|
|
|
2018-04-23 23:35:25 +00:00
|
|
|
// Generate a fake job with allocations and a reschedule policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 1,
|
|
|
|
Interval: 15 * time.Minute,
|
|
|
|
}
|
|
|
|
jobIndex := h.NextIndex()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.Nil(h.State.UpsertJob(structs.MsgTypeTestSetup, jobIndex, job))
|
2018-04-23 23:35:25 +00:00
|
|
|
|
|
|
|
deployment := mock.Deployment()
|
|
|
|
deployment.JobID = job.ID
|
|
|
|
deployment.JobCreateIndex = jobIndex
|
|
|
|
deployment.JobVersion = job.Version
|
|
|
|
if failedDeployment {
|
|
|
|
deployment.Status = structs.DeploymentStatusFailed
|
|
|
|
}
|
2018-02-02 23:22:37 +00:00
|
|
|
|
2018-04-23 23:35:25 +00:00
|
|
|
require.Nil(h.State.UpsertDeployment(h.NextIndex(), deployment))
|
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.DeploymentID = deployment.ID
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
// Mark one of the allocations as failed in the past
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{"web": {State: "start",
|
|
|
|
StartedAt: time.Now().Add(-12 * time.Hour),
|
|
|
|
FinishedAt: time.Now().Add(-10 * time.Hour)}}
|
2022-08-17 16:26:34 +00:00
|
|
|
allocs[1].DesiredTransition.Reschedule = pointer.Of(true)
|
2018-04-23 23:35:25 +00:00
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.Nil(h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2018-04-23 23:35:25 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.Nil(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-04-23 23:35:25 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
require.Nil(h.Process(NewServiceScheduler, eval))
|
|
|
|
|
|
|
|
if failedDeployment {
|
|
|
|
// Verify no plan created
|
|
|
|
require.Len(h.Plans, 0)
|
|
|
|
} else {
|
|
|
|
require.Len(h.Plans, 1)
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2018-02-02 23:22:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-12 23:14:32 +00:00
|
|
|
func TestBatchSched_Run_CompleteAlloc(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-02-03 01:19:41 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2017-09-14 21:00:33 +00:00
|
|
|
job.Type = structs.JobTypeBatch
|
2016-02-03 01:19:41 +00:00
|
|
|
job.TaskGroups[0].Count = 1
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
2016-04-12 23:14:32 +00:00
|
|
|
// Create a complete alloc
|
2016-02-03 01:19:41 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
2016-03-24 01:08:19 +00:00
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc}))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-02-03 01:19:41 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-02-03 01:19:41 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure no plan as it should be a no-op
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Ensure no allocations placed
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBatchSched_Run_FailedAlloc(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-02-03 01:19:41 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2017-09-14 21:00:33 +00:00
|
|
|
job.Type = structs.JobTypeBatch
|
2016-02-03 01:19:41 +00:00
|
|
|
job.TaskGroups[0].Count = 1
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2016-02-03 01:19:41 +00:00
|
|
|
// Create a failed alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusFailed
|
2018-03-02 00:23:44 +00:00
|
|
|
alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc}))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-02-03 01:19:41 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-02-03 01:19:41 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-03-21 21:17:37 +00:00
|
|
|
// Ensure a plan
|
2016-02-03 01:19:41 +00:00
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Ensure a replacement alloc was placed.
|
|
|
|
if len(out) != 2 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
2016-07-22 19:06:03 +00:00
|
|
|
// Ensure that the scheduler is recording the correct number of queued
|
|
|
|
// allocations
|
|
|
|
queued := h.Evals[0].QueuedAllocations["web"]
|
|
|
|
if queued != 0 {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", 1, queued)
|
|
|
|
}
|
|
|
|
|
2016-02-03 01:19:41 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
2016-05-25 00:47:03 +00:00
|
|
|
|
2018-01-04 22:20:32 +00:00
|
|
|
func TestBatchSched_Run_LostAlloc(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-01-04 22:20:32 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2018-01-04 22:20:32 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
|
|
|
job.ID = "my-job"
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.TaskGroups[0].Count = 3
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2018-01-04 22:20:32 +00:00
|
|
|
|
|
|
|
// Desired = 3
|
|
|
|
// Mark one as lost and then schedule
|
|
|
|
// [(0, run, running), (1, run, running), (1, stop, lost)]
|
|
|
|
|
|
|
|
// Create two running allocations
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i <= 1; i++ {
|
2022-01-15 01:09:14 +00:00
|
|
|
alloc := mock.AllocForNodeWithoutReservedPort(node)
|
2018-01-04 22:20:32 +00:00
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a failed alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[1]"
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
allocs = append(allocs, alloc)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2018-01-04 22:20:32 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-01-04 22:20:32 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-04 22:20:32 +00:00
|
|
|
|
|
|
|
// Ensure a replacement alloc was placed.
|
|
|
|
if len(out) != 4 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert that we have the correct number of each alloc name
|
|
|
|
expected := map[string]int{
|
|
|
|
"my-job.web[0]": 1,
|
|
|
|
"my-job.web[1]": 2,
|
|
|
|
"my-job.web[2]": 1,
|
|
|
|
}
|
|
|
|
actual := make(map[string]int, 3)
|
|
|
|
for _, alloc := range out {
|
|
|
|
actual[alloc.Name] += 1
|
|
|
|
}
|
|
|
|
require.Equal(t, actual, expected)
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-07-22 19:06:03 +00:00
|
|
|
func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-07-22 19:06:03 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
2021-02-11 15:40:59 +00:00
|
|
|
node := mock.DrainNode()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-07-22 19:06:03 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2017-09-14 21:00:33 +00:00
|
|
|
job.Type = structs.JobTypeBatch
|
2016-07-22 19:06:03 +00:00
|
|
|
job.TaskGroups[0].Count = 1
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-07-22 19:06:03 +00:00
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2016-07-22 19:06:03 +00:00
|
|
|
// Create a failed alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusFailed
|
2018-03-02 00:23:44 +00:00
|
|
|
alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc}))
|
2016-07-22 19:06:03 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-07-22 19:06:03 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-07-22 19:06:03 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-07-22 19:06:03 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the scheduler is recording the correct number of queued
|
|
|
|
// allocations
|
|
|
|
queued := h.Evals[0].QueuedAllocations["web"]
|
|
|
|
if queued != 1 {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", 1, queued)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-25 00:47:03 +00:00
|
|
|
func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-05-25 00:47:03 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create two nodes, one that is drained and has a successfully finished
|
|
|
|
// alloc and a fresh undrained one
|
2021-02-11 15:40:59 +00:00
|
|
|
node := mock.DrainNode()
|
2016-05-25 00:47:03 +00:00
|
|
|
node2 := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2))
|
2016-05-25 00:47:03 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.TaskGroups[0].Count = 1
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-05-25 00:47:03 +00:00
|
|
|
|
|
|
|
// Create a successful alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
alloc.TaskStates = map[string]*structs.TaskState{
|
2017-09-26 22:26:33 +00:00
|
|
|
"web": {
|
2016-05-25 00:47:03 +00:00
|
|
|
State: structs.TaskStateDead,
|
|
|
|
Events: []*structs.TaskEvent{
|
|
|
|
{
|
|
|
|
Type: structs.TaskTerminated,
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc}))
|
2016-05-25 00:47:03 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to rerun the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-05-25 00:47:03 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-05-25 00:47:03 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-05-25 00:47:03 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-05-25 00:47:03 +00:00
|
|
|
|
|
|
|
// Ensure no replacement alloc was placed.
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
2016-07-27 18:54:55 +00:00
|
|
|
|
2017-03-12 01:19:22 +00:00
|
|
|
// This test checks that terminal allocations that receive an in-place updated
|
|
|
|
// are not added to the plan
|
|
|
|
func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-03-12 01:19:22 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2017-03-12 01:19:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-03-12 01:19:22 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2017-03-12 01:19:22 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Create a mock evaluation to trigger the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-14 21:00:33 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-09-14 21:00:33 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test ensures that terminal jobs from older versions are ignored.
|
|
|
|
func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2017-09-14 21:00:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
2017-03-12 01:19:22 +00:00
|
|
|
// Update the job
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
2017-09-14 21:00:33 +00:00
|
|
|
job2.Type = structs.JobTypeBatch
|
|
|
|
job2.Version++
|
|
|
|
job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2017-03-12 01:19:22 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
allocs = nil
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job2
|
|
|
|
alloc.JobID = job2.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
alloc.TaskStates = map[string]*structs.TaskState{
|
2017-09-26 22:26:33 +00:00
|
|
|
"web": {
|
2017-09-14 21:00:33 +00:00
|
|
|
State: structs.TaskStateDead,
|
|
|
|
Events: []*structs.TaskEvent{
|
|
|
|
{
|
|
|
|
Type: structs.TaskTerminated,
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
2017-03-12 01:19:22 +00:00
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-12 01:19:22 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-03-12 01:19:22 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-03-12 01:19:22 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Ensure a plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test asserts that an allocation from an old job that is running on a
|
|
|
|
// drained node is cleaned up.
|
|
|
|
func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create two nodes, one that is drained and has a successfully finished
|
|
|
|
// alloc and a fresh undrained one
|
2021-02-11 15:40:59 +00:00
|
|
|
node := mock.DrainNode()
|
2017-09-14 21:00:33 +00:00
|
|
|
node2 := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.TaskGroups[0].Count = 1
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a running alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc}))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create an update job
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"}
|
2018-02-21 18:58:04 +00:00
|
|
|
job2.Version++
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-14 21:00:33 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-09-14 21:00:33 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted 1
|
|
|
|
if len(plan.NodeUpdate[node.ID]) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan places 1
|
|
|
|
if len(plan.NodeAllocation[node2.ID]) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test asserts that an allocation from a job that is complete on a
|
|
|
|
// drained node is ignored up.
|
|
|
|
func TestBatchSched_NodeDrain_Complete(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create two nodes, one that is drained and has a successfully finished
|
|
|
|
// alloc and a fresh undrained one
|
2021-02-11 15:40:59 +00:00
|
|
|
node := mock.DrainNode()
|
2017-09-14 21:00:33 +00:00
|
|
|
node2 := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.TaskGroups[0].Count = 1
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a complete alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
2018-01-04 22:20:32 +00:00
|
|
|
alloc.TaskStates = make(map[string]*structs.TaskState)
|
|
|
|
alloc.TaskStates["web"] = &structs.TaskState{
|
|
|
|
State: structs.TaskStateDead,
|
|
|
|
Events: []*structs.TaskEvent{
|
2018-01-04 22:45:15 +00:00
|
|
|
{
|
2018-01-04 22:20:32 +00:00
|
|
|
Type: structs.TaskTerminated,
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc}))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-14 21:00:33 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-09-14 21:00:33 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-12 01:19:22 +00:00
|
|
|
// Ensure no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2017-03-12 01:19:22 +00:00
|
|
|
}
|
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// This is a slightly odd test but it ensures that we handle a scale down of a
|
|
|
|
// task group's count and that it works even if all the allocs have the same
|
|
|
|
// name.
|
|
|
|
func TestBatchSched_ScaleDown_SameName(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.TaskGroups[0].Count = 1
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
2019-03-13 04:36:46 +00:00
|
|
|
scoreMetric := &structs.AllocMetric{
|
|
|
|
NodesEvaluated: 10,
|
|
|
|
NodesFiltered: 3,
|
|
|
|
ScoreMetaData: []*structs.NodeScoreMeta{
|
|
|
|
{
|
|
|
|
NodeID: node.ID,
|
|
|
|
Scores: map[string]float64{
|
|
|
|
"bin-packing": 0.5435,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2017-09-14 21:00:33 +00:00
|
|
|
// Create a few running alloc
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 5; i++ {
|
2022-01-15 01:09:14 +00:00
|
|
|
alloc := mock.AllocForNodeWithoutReservedPort(node)
|
2017-09-14 21:00:33 +00:00
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
2019-03-13 04:36:46 +00:00
|
|
|
alloc.Metrics = scoreMetric
|
2017-09-14 21:00:33 +00:00
|
|
|
allocs = append(allocs, alloc)
|
2016-07-27 18:54:55 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2016-07-27 18:54:55 +00:00
|
|
|
|
2019-03-13 04:36:46 +00:00
|
|
|
// Update the job's modify index to force an inplace upgrade
|
|
|
|
updatedJob := job.Copy()
|
|
|
|
updatedJob.JobModifyIndex = job.JobModifyIndex + 1
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), updatedJob))
|
2019-03-13 04:36:46 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-14 21:00:33 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-09-14 21:00:33 +00:00
|
|
|
}
|
2016-07-27 18:54:55 +00:00
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-08-30 22:36:30 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Ensure a plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
plan := h.Plans[0]
|
2016-08-30 22:36:30 +00:00
|
|
|
|
2019-03-13 04:36:46 +00:00
|
|
|
require := require.New(t)
|
2017-09-14 21:00:33 +00:00
|
|
|
// Ensure the plan evicted 4 of the 5
|
2019-03-13 04:36:46 +00:00
|
|
|
require.Equal(4, len(plan.NodeUpdate[node.ID]))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
2019-03-13 04:36:46 +00:00
|
|
|
// Ensure that the scheduler did not overwrite the original score metrics for the i
|
|
|
|
for _, inPlaceAllocs := range plan.NodeAllocation {
|
|
|
|
for _, alloc := range inPlaceAllocs {
|
|
|
|
require.Equal(scoreMetric, alloc.Metrics)
|
|
|
|
}
|
|
|
|
}
|
2017-09-14 21:00:33 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2016-07-27 18:54:55 +00:00
|
|
|
}
|
2016-08-16 00:52:41 +00:00
|
|
|
|
2021-03-26 20:01:27 +00:00
|
|
|
func TestGenericSched_AllocFit_Lifecycle(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
testCases := []struct {
|
|
|
|
Name string
|
|
|
|
NodeCpu int64
|
|
|
|
TaskResources structs.Resources
|
|
|
|
MainTaskCount int
|
|
|
|
InitTaskCount int
|
|
|
|
SideTaskCount int
|
|
|
|
ShouldPlaceAlloc bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Name: "simple init + sidecar",
|
|
|
|
NodeCpu: 1200,
|
|
|
|
TaskResources: structs.Resources{
|
|
|
|
CPU: 500,
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
MainTaskCount: 1,
|
|
|
|
InitTaskCount: 1,
|
|
|
|
SideTaskCount: 1,
|
|
|
|
ShouldPlaceAlloc: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "too big init + sidecar",
|
|
|
|
NodeCpu: 1200,
|
|
|
|
TaskResources: structs.Resources{
|
|
|
|
CPU: 700,
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
MainTaskCount: 1,
|
|
|
|
InitTaskCount: 1,
|
|
|
|
SideTaskCount: 1,
|
|
|
|
ShouldPlaceAlloc: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "many init + sidecar",
|
|
|
|
NodeCpu: 1200,
|
|
|
|
TaskResources: structs.Resources{
|
|
|
|
CPU: 100,
|
|
|
|
MemoryMB: 100,
|
|
|
|
},
|
|
|
|
MainTaskCount: 3,
|
|
|
|
InitTaskCount: 5,
|
|
|
|
SideTaskCount: 5,
|
|
|
|
ShouldPlaceAlloc: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "too many init + sidecar",
|
|
|
|
NodeCpu: 1200,
|
|
|
|
TaskResources: structs.Resources{
|
|
|
|
CPU: 100,
|
|
|
|
MemoryMB: 100,
|
|
|
|
},
|
|
|
|
MainTaskCount: 10,
|
|
|
|
InitTaskCount: 10,
|
|
|
|
SideTaskCount: 10,
|
|
|
|
ShouldPlaceAlloc: false,
|
|
|
|
},
|
2020-01-09 23:43:00 +00:00
|
|
|
{
|
|
|
|
Name: "too many too big",
|
|
|
|
NodeCpu: 1200,
|
|
|
|
TaskResources: structs.Resources{
|
|
|
|
CPU: 1000,
|
|
|
|
MemoryMB: 100,
|
|
|
|
},
|
|
|
|
MainTaskCount: 10,
|
|
|
|
InitTaskCount: 10,
|
|
|
|
SideTaskCount: 10,
|
|
|
|
ShouldPlaceAlloc: false,
|
|
|
|
},
|
2020-01-07 19:48:05 +00:00
|
|
|
}
|
2020-01-09 19:34:46 +00:00
|
|
|
for _, testCase := range testCases {
|
|
|
|
t.Run(testCase.Name, func(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
node := mock.Node()
|
|
|
|
node.NodeResources.Cpu.CpuShares = testCase.NodeCpu
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
// Create a job with sidecar & init tasks
|
|
|
|
job := mock.VariableLifecycleJob(testCase.TaskResources, testCase.MainTaskCount, testCase.InitTaskCount, testCase.SideTaskCount)
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
allocs := 0
|
|
|
|
if testCase.ShouldPlaceAlloc {
|
|
|
|
allocs = 1
|
|
|
|
}
|
|
|
|
// Ensure no plan as it should be a no-op
|
|
|
|
require.Len(t, h.Plans, allocs)
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Ensure no allocations placed
|
|
|
|
require.Len(t, out, allocs)
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
})
|
|
|
|
}
|
2020-01-07 19:48:05 +00:00
|
|
|
}
|
|
|
|
|
2021-03-26 20:01:27 +00:00
|
|
|
func TestGenericSched_AllocFit_MemoryOversubscription(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2021-03-26 20:01:27 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
node := mock.Node()
|
|
|
|
node.NodeResources.Cpu.CpuShares = 10000
|
|
|
|
node.NodeResources.Memory.MemoryMB = 1224
|
|
|
|
node.ReservedResources.Memory.MemoryMB = 60
|
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 10
|
|
|
|
job.TaskGroups[0].Tasks[0].Resources.CPU = 100
|
|
|
|
job.TaskGroups[0].Tasks[0].Resources.MemoryMB = 200
|
|
|
|
job.TaskGroups[0].Tasks[0].Resources.MemoryMaxMB = 500
|
|
|
|
job.TaskGroups[0].Tasks[0].Resources.DiskMB = 1
|
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// expectedAllocs should be floor((nodeResources.MemoryMB-reservedResources.MemoryMB) / job.MemoryMB)
|
|
|
|
expectedAllocs := 5
|
|
|
|
require.Len(t, h.Plans, 1)
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, out, expectedAllocs)
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-08-16 00:52:41 +00:00
|
|
|
func TestGenericSched_ChainedAlloc(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-08-16 00:52:41 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-08-16 00:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2016-08-16 00:52:41 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-16 00:52:41 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-16 00:52:41 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-08-16 00:52:41 +00:00
|
|
|
// Process the evaluation
|
|
|
|
if err := h.Process(NewServiceScheduler, eval); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var allocIDs []string
|
|
|
|
for _, allocList := range h.Plans[0].NodeAllocation {
|
|
|
|
for _, alloc := range allocList {
|
|
|
|
allocIDs = append(allocIDs, alloc.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(allocIDs)
|
|
|
|
|
|
|
|
// Create a new harness to invoke the scheduler again
|
|
|
|
h1 := NewHarnessWithState(t, h.State)
|
|
|
|
job1 := mock.Job()
|
|
|
|
job1.ID = job.ID
|
|
|
|
job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar"
|
|
|
|
job1.TaskGroups[0].Count = 12
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h1.State.UpsertJob(structs.MsgTypeTestSetup, h1.NextIndex(), job1))
|
2016-08-16 00:52:41 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to update the job
|
|
|
|
eval1 := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-16 00:52:41 +00:00
|
|
|
Priority: job1.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job1.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-16 00:52:41 +00:00
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-08-16 00:52:41 +00:00
|
|
|
// Process the evaluation
|
|
|
|
if err := h1.Process(NewServiceScheduler, eval1); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
plan := h1.Plans[0]
|
|
|
|
|
|
|
|
// Collect all the chained allocation ids and the new allocations which
|
|
|
|
// don't have any chained allocations
|
|
|
|
var prevAllocs []string
|
|
|
|
var newAllocs []string
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
for _, alloc := range allocList {
|
|
|
|
if alloc.PreviousAllocation == "" {
|
|
|
|
newAllocs = append(newAllocs, alloc.ID)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prevAllocs = append(prevAllocs, alloc.PreviousAllocation)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(prevAllocs)
|
|
|
|
|
2018-01-24 20:56:57 +00:00
|
|
|
// Ensure that the new allocations has their corresponding original
|
2016-08-16 00:52:41 +00:00
|
|
|
// allocation ids
|
|
|
|
if !reflect.DeepEqual(prevAllocs, allocIDs) {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", len(allocIDs), len(prevAllocs))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensuring two new allocations don't have any chained allocations
|
|
|
|
if len(newAllocs) != 2 {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", 2, len(newAllocs))
|
|
|
|
}
|
|
|
|
}
|
2016-09-24 04:15:50 +00:00
|
|
|
|
|
|
|
func TestServiceSched_NodeDrain_Sticky(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-09-24 04:15:50 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Register a draining node
|
2021-02-11 15:40:59 +00:00
|
|
|
node := mock.DrainNode()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2016-09-24 04:15:50 +00:00
|
|
|
|
|
|
|
// Create an alloc on the draining node
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Job.TaskGroups[0].Count = 1
|
|
|
|
alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true
|
2022-08-17 16:26:34 +00:00
|
|
|
alloc.DesiredTransition.Migrate = pointer.Of(true)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), alloc.Job))
|
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc}))
|
2016-09-24 04:15:50 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-09-24 04:15:50 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: alloc.Job.ID,
|
|
|
|
NodeID: node.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-09-24 04:15:50 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-09-24 04:15:50 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all allocs
|
|
|
|
if len(plan.NodeUpdate[node.ID]) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan didn't create any new allocations
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// This test ensures that when a job is stopped, the scheduler properly cancels
|
|
|
|
// an outstanding deployment.
|
|
|
|
func TestServiceSched_CancelDeployment_Stopped(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-05-18 19:36:04 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Generate a fake job
|
|
|
|
job := mock.Job()
|
|
|
|
job.JobModifyIndex = job.CreateIndex + 1
|
|
|
|
job.ModifyIndex = job.CreateIndex + 1
|
|
|
|
job.Stop = true
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// Create a deployment
|
|
|
|
d := mock.Deployment()
|
|
|
|
d.JobID = job.ID
|
|
|
|
d.JobCreateIndex = job.CreateIndex
|
|
|
|
d.JobModifyIndex = job.JobModifyIndex - 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d))
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deregister the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-05-18 19:36:04 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobDeregister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-05-18 19:36:04 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-05-18 19:36:04 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan cancelled the existing deployment
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("No deployment for job")
|
|
|
|
}
|
|
|
|
if out.ID != d.ID {
|
|
|
|
t.Fatalf("Latest deployment for job is different than original deployment")
|
|
|
|
}
|
|
|
|
if out.Status != structs.DeploymentStatusCancelled {
|
|
|
|
t.Fatalf("Deployment status is %q, want %q", out.Status, structs.DeploymentStatusCancelled)
|
|
|
|
}
|
|
|
|
if out.StatusDescription != structs.DeploymentStatusDescriptionStoppedJob {
|
|
|
|
t.Fatalf("Deployment status description is %q, want %q",
|
|
|
|
out.StatusDescription, structs.DeploymentStatusDescriptionStoppedJob)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan didn't allocate anything
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test ensures that when a job is updated and had an old deployment, the scheduler properly cancels
|
|
|
|
// the deployment.
|
|
|
|
func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2017-05-18 19:36:04 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Generate a fake job
|
|
|
|
job := mock.Job()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// Create a deployment for an old version of the job
|
|
|
|
d := mock.Deployment()
|
|
|
|
d.JobID = job.ID
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d))
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// Upsert again to bump job version
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to kick the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-05-18 19:36:04 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-05-18 19:36:04 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-05-18 19:36:04 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan cancelled the existing deployment
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("No deployment for job")
|
|
|
|
}
|
|
|
|
if out.ID != d.ID {
|
|
|
|
t.Fatalf("Latest deployment for job is different than original deployment")
|
|
|
|
}
|
|
|
|
if out.Status != structs.DeploymentStatusCancelled {
|
|
|
|
t.Fatalf("Deployment status is %q, want %q", out.Status, structs.DeploymentStatusCancelled)
|
|
|
|
}
|
|
|
|
if out.StatusDescription != structs.DeploymentStatusDescriptionNewerJob {
|
|
|
|
t.Fatalf("Deployment status description is %q, want %q",
|
|
|
|
out.StatusDescription, structs.DeploymentStatusDescriptionNewerJob)
|
|
|
|
}
|
|
|
|
// Ensure the plan didn't allocate anything
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Various table driven tests for carry forward
|
|
|
|
// of past reschedule events
|
|
|
|
func Test_updateRescheduleTracker(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
t1 := time.Now().UTC()
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
prevAlloc := mock.Alloc()
|
|
|
|
|
|
|
|
type testCase struct {
|
|
|
|
desc string
|
|
|
|
prevAllocEvents []*structs.RescheduleEvent
|
|
|
|
reschedPolicy *structs.ReschedulePolicy
|
|
|
|
expectedRescheduleEvents []*structs.RescheduleEvent
|
|
|
|
reschedTime time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []testCase{
|
|
|
|
{
|
2018-09-04 23:03:52 +00:00
|
|
|
desc: "No past events",
|
|
|
|
prevAllocEvents: nil,
|
|
|
|
reschedPolicy: &structs.ReschedulePolicy{Unlimited: false, Interval: 24 * time.Hour, Attempts: 2, Delay: 5 * time.Second},
|
|
|
|
reschedTime: t1,
|
|
|
|
expectedRescheduleEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
2018-03-02 00:23:44 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "one past event, linear delay",
|
|
|
|
prevAllocEvents: []*structs.RescheduleEvent{
|
|
|
|
{RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second}},
|
|
|
|
reschedPolicy: &structs.ReschedulePolicy{Unlimited: false, Interval: 24 * time.Hour, Attempts: 2, Delay: 5 * time.Second},
|
|
|
|
reschedTime: t1,
|
|
|
|
expectedRescheduleEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "one past event, fibonacci delay",
|
|
|
|
prevAllocEvents: []*structs.RescheduleEvent{
|
|
|
|
{RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second}},
|
2018-03-13 15:06:26 +00:00
|
|
|
reschedPolicy: &structs.ReschedulePolicy{Unlimited: false, Interval: 24 * time.Hour, Attempts: 2, Delay: 5 * time.Second, DelayFunction: "fibonacci", MaxDelay: 60 * time.Second},
|
2018-03-02 00:23:44 +00:00
|
|
|
reschedTime: t1,
|
|
|
|
expectedRescheduleEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "eight past events, fibonacci delay, unlimited",
|
|
|
|
prevAllocEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 10 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 15 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 25 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 40 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 65 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 105 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
2018-03-13 15:06:26 +00:00
|
|
|
reschedPolicy: &structs.ReschedulePolicy{Unlimited: true, Delay: 5 * time.Second, DelayFunction: "fibonacci", MaxDelay: 240 * time.Second},
|
2018-03-02 00:23:44 +00:00
|
|
|
reschedTime: t1,
|
|
|
|
expectedRescheduleEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 15 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 25 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 40 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 65 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 105 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 170 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: " old attempts past interval, exponential delay, limited",
|
|
|
|
prevAllocEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-2 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
2018-03-08 00:44:54 +00:00
|
|
|
RescheduleTime: t1.Add(-70 * time.Minute).UnixNano(),
|
2018-03-02 00:23:44 +00:00
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 10 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-30 * time.Minute).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 20 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-10 * time.Minute).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 40 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
2018-03-13 15:06:26 +00:00
|
|
|
reschedPolicy: &structs.ReschedulePolicy{Unlimited: false, Interval: 1 * time.Hour, Attempts: 5, Delay: 5 * time.Second, DelayFunction: "exponential", MaxDelay: 240 * time.Second},
|
2018-03-02 00:23:44 +00:00
|
|
|
reschedTime: t1,
|
|
|
|
expectedRescheduleEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-30 * time.Minute).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 20 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-10 * time.Minute).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 40 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 80 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.desc, func(t *testing.T) {
|
|
|
|
require := require.New(t)
|
|
|
|
prevAlloc.RescheduleTracker = &structs.RescheduleTracker{Events: tc.prevAllocEvents}
|
2018-03-08 15:36:01 +00:00
|
|
|
prevAlloc.Job.LookupTaskGroup(prevAlloc.TaskGroup).ReschedulePolicy = tc.reschedPolicy
|
|
|
|
updateRescheduleTracker(alloc, prevAlloc, tc.reschedTime)
|
2018-03-02 00:23:44 +00:00
|
|
|
require.Equal(tc.expectedRescheduleEvents, alloc.RescheduleTracker.Events)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
func TestServiceSched_Preemption(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-05-27 19:02:01 +00:00
|
|
|
require := require.New(t)
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
|
|
|
node.Resources = nil
|
|
|
|
node.ReservedResources = nil
|
|
|
|
node.NodeResources = &structs.NodeResources{
|
|
|
|
Cpu: structs.NodeCpuResources{
|
|
|
|
CpuShares: 1000,
|
|
|
|
},
|
|
|
|
Memory: structs.NodeMemoryResources{
|
|
|
|
MemoryMB: 2048,
|
|
|
|
},
|
|
|
|
Disk: structs.NodeDiskResources{
|
|
|
|
DiskMB: 100 * 1024,
|
|
|
|
},
|
|
|
|
Networks: []*structs.NetworkResource{
|
|
|
|
{
|
2020-06-17 18:01:17 +00:00
|
|
|
Mode: "host",
|
2020-05-27 19:02:01 +00:00
|
|
|
Device: "eth0",
|
|
|
|
CIDR: "192.168.0.100/32",
|
|
|
|
MBits: 1000,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
node.ReservedResources = &structs.NodeReservedResources{
|
|
|
|
Cpu: structs.NodeReservedCpuResources{
|
|
|
|
CpuShares: 50,
|
|
|
|
},
|
|
|
|
Memory: structs.NodeReservedMemoryResources{
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
Disk: structs.NodeReservedDiskResources{
|
|
|
|
DiskMB: 4 * 1024,
|
|
|
|
},
|
|
|
|
Networks: structs.NodeReservedNetworkResources{
|
|
|
|
ReservedHostPorts: "22",
|
|
|
|
},
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
// Create a couple of jobs and schedule them
|
|
|
|
job1 := mock.Job()
|
|
|
|
job1.TaskGroups[0].Count = 1
|
2020-08-29 01:40:53 +00:00
|
|
|
job1.TaskGroups[0].Networks = nil
|
2020-05-27 19:02:01 +00:00
|
|
|
job1.Priority = 30
|
|
|
|
r1 := job1.TaskGroups[0].Tasks[0].Resources
|
|
|
|
r1.CPU = 500
|
|
|
|
r1.MemoryMB = 1024
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job1))
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.TaskGroups[0].Count = 1
|
2020-08-29 01:40:53 +00:00
|
|
|
job2.TaskGroups[0].Networks = nil
|
2020-05-27 19:02:01 +00:00
|
|
|
job2.Priority = 50
|
|
|
|
r2 := job2.TaskGroups[0].Tasks[0].Resources
|
|
|
|
r2.CPU = 350
|
|
|
|
r2.MemoryMB = 512
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the jobs
|
|
|
|
eval1 := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job1.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job1.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
eval2 := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job2.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job2.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval1, eval2}))
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
expectedPreemptedAllocs := make(map[string]struct{})
|
|
|
|
// Process the two evals for job1 and job2 and make sure they allocated
|
|
|
|
for index, eval := range []*structs.Evaluation{eval1, eval2} {
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.Nil(err)
|
|
|
|
|
|
|
|
plan := h.Plans[index]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
require.Nil(plan.Annotations)
|
|
|
|
|
|
|
|
// Ensure the eval has no spawned blocked eval
|
|
|
|
require.Equal(0, len(h.CreateEvals))
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
require.Equal(1, len(planned))
|
|
|
|
expectedPreemptedAllocs[planned[0].ID] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a higher priority job
|
|
|
|
job3 := mock.Job()
|
|
|
|
job3.Priority = 100
|
|
|
|
job3.TaskGroups[0].Count = 1
|
2020-08-29 01:40:53 +00:00
|
|
|
job3.TaskGroups[0].Networks = nil
|
2020-05-27 19:02:01 +00:00
|
|
|
r3 := job3.TaskGroups[0].Tasks[0].Resources
|
|
|
|
r3.CPU = 900
|
|
|
|
r3.MemoryMB = 1700
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job3))
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job3.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job3.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.Nil(err)
|
|
|
|
|
|
|
|
// New plan should be the third one in the harness
|
|
|
|
plan := h.Plans[2]
|
|
|
|
|
|
|
|
// Ensure the eval has no spawned blocked eval
|
|
|
|
require.Equal(0, len(h.CreateEvals))
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
require.Equal(1, len(planned))
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job3.Namespace, job3.ID, false)
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
require.Equal(1, len(out))
|
|
|
|
actualPreemptedAllocs := make(map[string]struct{})
|
|
|
|
for _, id := range out[0].PreemptedAllocations {
|
|
|
|
actualPreemptedAllocs[id] = struct{}{}
|
|
|
|
}
|
|
|
|
require.Equal(expectedPreemptedAllocs, actualPreemptedAllocs)
|
|
|
|
}
|
2020-08-13 13:35:09 +00:00
|
|
|
|
2020-09-10 18:18:55 +00:00
|
|
|
// TestServiceSched_Migrate_NonCanary asserts that when rescheduling
|
|
|
|
// non-canary allocations, a single allocation is migrated
|
|
|
|
func TestServiceSched_Migrate_NonCanary(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-09-10 18:18:55 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
node1 := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1))
|
2020-09-10 18:18:55 +00:00
|
|
|
|
|
|
|
job := mock.Job()
|
|
|
|
job.Stable = true
|
|
|
|
job.TaskGroups[0].Count = 1
|
|
|
|
job.TaskGroups[0].Update = &structs.UpdateStrategy{
|
|
|
|
MaxParallel: 1,
|
|
|
|
Canary: 1,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2020-09-10 18:18:55 +00:00
|
|
|
|
|
|
|
deployment := &structs.Deployment{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
JobID: job.ID,
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
JobVersion: job.Version,
|
|
|
|
JobModifyIndex: job.JobModifyIndex,
|
|
|
|
JobCreateIndex: job.CreateIndex,
|
|
|
|
TaskGroups: map[string]*structs.DeploymentState{
|
|
|
|
"web": {DesiredTotal: 1},
|
|
|
|
},
|
|
|
|
Status: structs.DeploymentStatusSuccessful,
|
|
|
|
StatusDescription: structs.DeploymentStatusDescriptionSuccessful,
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment))
|
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node1.ID
|
|
|
|
alloc.DeploymentID = deployment.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
2022-08-17 16:26:34 +00:00
|
|
|
alloc.DesiredTransition.Migrate = pointer.Of(true)
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Allocation{alloc}))
|
2020-09-10 18:18:55 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerAllocStop,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2020-09-10 18:18:55 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
require.Len(t, h.Plans, 1)
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
require.Contains(t, plan.NodeAllocation, node1.ID)
|
|
|
|
allocs := plan.NodeAllocation[node1.ID]
|
|
|
|
require.Len(t, allocs, 1)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-08-13 13:35:09 +00:00
|
|
|
// TestServiceSched_Migrate_CanaryStatus asserts that migrations/rescheduling
|
|
|
|
// of allocations use the proper versions of allocs rather than latest:
|
|
|
|
// Canaries should be replaced by canaries, and non-canaries should be replaced
|
|
|
|
// with the latest promoted version.
|
|
|
|
func TestServiceSched_Migrate_CanaryStatus(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-08-13 13:35:09 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
node1 := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1))
|
2020-08-13 13:35:09 +00:00
|
|
|
|
|
|
|
totalCount := 3
|
|
|
|
desiredCanaries := 1
|
|
|
|
|
|
|
|
job := mock.Job()
|
|
|
|
job.Stable = true
|
|
|
|
job.TaskGroups[0].Count = totalCount
|
|
|
|
job.TaskGroups[0].Update = &structs.UpdateStrategy{
|
|
|
|
MaxParallel: 1,
|
|
|
|
Canary: desiredCanaries,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2020-08-13 13:35:09 +00:00
|
|
|
|
|
|
|
deployment := &structs.Deployment{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
JobID: job.ID,
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
JobVersion: job.Version,
|
|
|
|
JobModifyIndex: job.JobModifyIndex,
|
|
|
|
JobCreateIndex: job.CreateIndex,
|
|
|
|
TaskGroups: map[string]*structs.DeploymentState{
|
|
|
|
"web": {DesiredTotal: totalCount},
|
|
|
|
},
|
|
|
|
Status: structs.DeploymentStatusSuccessful,
|
|
|
|
StatusDescription: structs.DeploymentStatusDescriptionSuccessful,
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment))
|
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 3; i++ {
|
2022-01-15 01:09:14 +00:00
|
|
|
alloc := mock.AllocForNodeWithoutReservedPort(node1)
|
2020-08-13 13:35:09 +00:00
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.DeploymentID = deployment.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2020-08-13 13:35:09 +00:00
|
|
|
|
|
|
|
// new update with new task group
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.Stable = false
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2020-08-13 13:35:09 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2020-08-13 13:35:09 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
require.Len(t, h.Plans, 1)
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure a deployment was created
|
|
|
|
require.NotNil(t, plan.Deployment)
|
|
|
|
updateDeployment := plan.Deployment.ID
|
|
|
|
|
|
|
|
// Check status first - should be 4 allocs, only one is canary
|
|
|
|
{
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
allocs, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, allocs, 4)
|
|
|
|
|
|
|
|
sort.Slice(allocs, func(i, j int) bool { return allocs[i].CreateIndex < allocs[j].CreateIndex })
|
|
|
|
|
|
|
|
for _, a := range allocs[:3] {
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusRun, a.DesiredStatus)
|
|
|
|
require.Equal(t, uint64(0), a.Job.Version)
|
|
|
|
require.False(t, a.DeploymentStatus.IsCanary())
|
|
|
|
require.Equal(t, node1.ID, a.NodeID)
|
|
|
|
require.Equal(t, deployment.ID, a.DeploymentID)
|
|
|
|
}
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusRun, allocs[3].DesiredStatus)
|
|
|
|
require.Equal(t, uint64(1), allocs[3].Job.Version)
|
|
|
|
require.True(t, allocs[3].DeploymentStatus.Canary)
|
|
|
|
require.Equal(t, node1.ID, allocs[3].NodeID)
|
|
|
|
require.Equal(t, updateDeployment, allocs[3].DeploymentID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// now, drain node1 and ensure all are migrated to node2
|
|
|
|
node1 = node1.Copy()
|
|
|
|
node1.Status = structs.NodeStatusDown
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1))
|
2020-08-13 13:35:09 +00:00
|
|
|
|
|
|
|
node2 := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node2))
|
2020-08-13 13:35:09 +00:00
|
|
|
|
|
|
|
neval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
NodeID: node1.ID,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{neval}))
|
2020-08-13 13:35:09 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err = h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Now test that all node1 allocs are migrated while preserving Version and Canary info
|
|
|
|
{
|
2020-09-10 16:44:24 +00:00
|
|
|
// FIXME: This is a bug, we ought to reschedule canaries in this case but don't
|
|
|
|
rescheduleCanary := false
|
|
|
|
|
|
|
|
expectedMigrations := 3
|
|
|
|
if rescheduleCanary {
|
|
|
|
expectedMigrations++
|
|
|
|
}
|
|
|
|
|
2020-08-13 13:35:09 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
allocs, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, true)
|
|
|
|
require.NoError(t, err)
|
2020-09-10 16:44:24 +00:00
|
|
|
require.Len(t, allocs, 4+expectedMigrations)
|
2020-08-13 13:35:09 +00:00
|
|
|
|
|
|
|
nodeAllocs := map[string][]*structs.Allocation{}
|
|
|
|
for _, a := range allocs {
|
|
|
|
nodeAllocs[a.NodeID] = append(nodeAllocs[a.NodeID], a)
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Len(t, nodeAllocs[node1.ID], 4)
|
|
|
|
for _, a := range nodeAllocs[node1.ID] {
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusStop, a.DesiredStatus)
|
|
|
|
require.Equal(t, node1.ID, a.NodeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
node2Allocs := nodeAllocs[node2.ID]
|
2020-09-10 16:44:24 +00:00
|
|
|
require.Len(t, node2Allocs, expectedMigrations)
|
2020-08-13 13:35:09 +00:00
|
|
|
sort.Slice(node2Allocs, func(i, j int) bool { return node2Allocs[i].Job.Version < node2Allocs[j].Job.Version })
|
|
|
|
|
|
|
|
for _, a := range node2Allocs[:3] {
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusRun, a.DesiredStatus)
|
|
|
|
require.Equal(t, uint64(0), a.Job.Version)
|
|
|
|
require.Equal(t, node2.ID, a.NodeID)
|
|
|
|
require.Equal(t, deployment.ID, a.DeploymentID)
|
|
|
|
}
|
2020-09-10 16:44:24 +00:00
|
|
|
if rescheduleCanary {
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusRun, node2Allocs[3].DesiredStatus)
|
|
|
|
require.Equal(t, uint64(1), node2Allocs[3].Job.Version)
|
|
|
|
require.Equal(t, node2.ID, node2Allocs[3].NodeID)
|
|
|
|
require.Equal(t, updateDeployment, node2Allocs[3].DeploymentID)
|
|
|
|
}
|
2020-08-13 13:35:09 +00:00
|
|
|
}
|
|
|
|
}
|
2020-09-14 21:12:53 +00:00
|
|
|
|
2020-12-08 17:57:47 +00:00
|
|
|
// TestDowngradedJobForPlacement_PicksTheLatest asserts that downgradedJobForPlacement
|
|
|
|
// picks the latest deployment that have either been marked as promoted or is considered
|
|
|
|
// non-destructive so it doesn't use canaries.
|
|
|
|
func TestDowngradedJobForPlacement_PicksTheLatest(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-12-08 17:57:47 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// This test tests downgradedJobForPlacement directly to ease testing many different scenarios
|
|
|
|
// without invoking the full machinary of scheduling and updating deployment state tracking.
|
|
|
|
//
|
|
|
|
// It scafold the parts of scheduler and state stores so we can mimic the updates.
|
|
|
|
updates := []struct {
|
|
|
|
// Version of the job this update represent
|
|
|
|
version uint64
|
|
|
|
|
|
|
|
// whether this update is marked as promoted: Promoted is only true if the job
|
|
|
|
// update is a "destructive" update and has been updated manually
|
|
|
|
promoted bool
|
|
|
|
|
|
|
|
// requireCanaries indicate whether the job update requires placing canaries due to
|
|
|
|
// it being a destructive update compared to the latest promoted deployment.
|
|
|
|
requireCanaries bool
|
|
|
|
|
|
|
|
// the expected version for migrating a stable non-canary alloc after applying this update
|
|
|
|
expectedVersion uint64
|
|
|
|
}{
|
|
|
|
// always use latest promoted deployment
|
|
|
|
{1, true, true, 1},
|
|
|
|
{2, true, true, 2},
|
|
|
|
{3, true, true, 3},
|
|
|
|
|
|
|
|
// ignore most recent non promoted
|
|
|
|
{4, false, true, 3},
|
|
|
|
{5, false, true, 3},
|
|
|
|
{6, false, true, 3},
|
|
|
|
|
|
|
|
// use latest promoted after promotion
|
|
|
|
{7, true, true, 7},
|
|
|
|
|
|
|
|
// non destructive updates that don't require canaries and are treated as promoted
|
|
|
|
{8, false, false, 8},
|
|
|
|
}
|
|
|
|
|
|
|
|
job := mock.Job()
|
|
|
|
job.Version = 0
|
|
|
|
job.Stable = true
|
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
|
|
|
|
|
|
|
initDeployment := &structs.Deployment{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
JobID: job.ID,
|
|
|
|
Namespace: job.Namespace,
|
|
|
|
JobVersion: job.Version,
|
|
|
|
JobModifyIndex: job.JobModifyIndex,
|
|
|
|
JobCreateIndex: job.CreateIndex,
|
|
|
|
TaskGroups: map[string]*structs.DeploymentState{
|
|
|
|
"web": {
|
|
|
|
DesiredTotal: 1,
|
|
|
|
Promoted: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Status: structs.DeploymentStatusSuccessful,
|
|
|
|
StatusDescription: structs.DeploymentStatusDescriptionSuccessful,
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), initDeployment))
|
|
|
|
|
|
|
|
deploymentIDs := []string{initDeployment.ID}
|
|
|
|
|
|
|
|
for i, u := range updates {
|
|
|
|
t.Run(fmt.Sprintf("%d: %#+v", i, u), func(t *testing.T) {
|
|
|
|
t.Logf("case: %#+v", u)
|
|
|
|
nj := job.Copy()
|
|
|
|
nj.Version = u.version
|
|
|
|
nj.TaskGroups[0].Tasks[0].Env["version"] = fmt.Sprintf("%v", u.version)
|
|
|
|
nj.TaskGroups[0].Count = 1
|
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nj))
|
|
|
|
|
|
|
|
desiredCanaries := 1
|
|
|
|
if !u.requireCanaries {
|
|
|
|
desiredCanaries = 0
|
|
|
|
}
|
|
|
|
deployment := &structs.Deployment{
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
JobID: nj.ID,
|
|
|
|
Namespace: nj.Namespace,
|
|
|
|
JobVersion: nj.Version,
|
|
|
|
JobModifyIndex: nj.JobModifyIndex,
|
|
|
|
JobCreateIndex: nj.CreateIndex,
|
|
|
|
TaskGroups: map[string]*structs.DeploymentState{
|
|
|
|
"web": {
|
|
|
|
DesiredTotal: 1,
|
|
|
|
Promoted: u.promoted,
|
|
|
|
DesiredCanaries: desiredCanaries,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Status: structs.DeploymentStatusSuccessful,
|
|
|
|
StatusDescription: structs.DeploymentStatusDescriptionSuccessful,
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), deployment))
|
|
|
|
|
|
|
|
deploymentIDs = append(deploymentIDs, deployment.ID)
|
|
|
|
|
|
|
|
sched := h.Scheduler(NewServiceScheduler).(*GenericScheduler)
|
|
|
|
|
|
|
|
sched.job = nj
|
|
|
|
sched.deployment = deployment
|
|
|
|
placement := &allocPlaceResult{
|
|
|
|
taskGroup: nj.TaskGroups[0],
|
|
|
|
}
|
|
|
|
|
|
|
|
// Here, assert the downgraded job version
|
|
|
|
foundDeploymentID, foundJob, err := sched.downgradedJobForPlacement(placement)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, u.expectedVersion, foundJob.Version)
|
|
|
|
require.Equal(t, deploymentIDs[u.expectedVersion], foundDeploymentID)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-14 21:12:53 +00:00
|
|
|
// TestServiceSched_RunningWithNextAllocation asserts that if a running allocation has
|
|
|
|
// NextAllocation Set, the allocation is not ignored and will be stopped
|
|
|
|
func TestServiceSched_RunningWithNextAllocation(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2020-09-14 21:12:53 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
node1 := mock.Node()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node1))
|
2020-09-14 21:12:53 +00:00
|
|
|
|
|
|
|
totalCount := 2
|
|
|
|
job := mock.Job()
|
|
|
|
job.Version = 0
|
|
|
|
job.Stable = true
|
|
|
|
job.TaskGroups[0].Count = totalCount
|
|
|
|
job.TaskGroups[0].Update = nil
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
2020-09-14 21:12:53 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < totalCount+1; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node1.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// simulate a case where .NextAllocation is set but alloc is still running
|
|
|
|
allocs[2].PreviousAllocation = allocs[0].ID
|
|
|
|
allocs[0].NextAllocation = allocs[2].ID
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
2020-09-14 21:12:53 +00:00
|
|
|
|
|
|
|
// new update with new task group
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.Version = 1
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job2))
|
2020-09-14 21:12:53 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), []*structs.Evaluation{eval}))
|
2020-09-14 21:12:53 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// assert that all original allocations have been stopped
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
updated, err := h.State.AllocByID(nil, alloc.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equalf(t, structs.AllocDesiredStatusStop, updated.DesiredStatus, "alloc %v", alloc.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// assert that the new job has proper allocations
|
|
|
|
|
|
|
|
jobAllocs, err := h.State.AllocsByJob(nil, job.Namespace, job.ID, true)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, jobAllocs, 5)
|
|
|
|
|
|
|
|
allocsByVersion := map[uint64][]string{}
|
|
|
|
for _, alloc := range jobAllocs {
|
|
|
|
allocsByVersion[alloc.Job.Version] = append(allocsByVersion[alloc.Job.Version], alloc.ID)
|
|
|
|
}
|
|
|
|
require.Len(t, allocsByVersion[1], 2)
|
|
|
|
require.Len(t, allocsByVersion[0], 3)
|
|
|
|
}
|
2021-03-18 19:35:11 +00:00
|
|
|
|
|
|
|
func TestServiceSched_CSIVolumesPerAlloc(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2021-03-18 19:35:11 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Create some nodes, each running the CSI plugin
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
node.CSINodePlugins = map[string]*structs.CSIInfo{
|
|
|
|
"test-plugin": {
|
|
|
|
PluginID: "test-plugin",
|
|
|
|
Healthy: true,
|
|
|
|
NodeInfo: &structs.CSINodeInfo{MaxVolumes: 2},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(h.State.UpsertNode(
|
|
|
|
structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
}
|
|
|
|
|
|
|
|
// create per-alloc volumes
|
|
|
|
vol0 := structs.NewCSIVolume("volume-unique[0]", 0)
|
|
|
|
vol0.PluginID = "test-plugin"
|
|
|
|
vol0.Namespace = structs.DefaultNamespace
|
|
|
|
vol0.AccessMode = structs.CSIVolumeAccessModeSingleNodeWriter
|
|
|
|
vol0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem
|
|
|
|
|
|
|
|
vol1 := vol0.Copy()
|
|
|
|
vol1.ID = "volume-unique[1]"
|
|
|
|
vol2 := vol0.Copy()
|
|
|
|
vol2.ID = "volume-unique[2]"
|
|
|
|
|
|
|
|
// create shared volume
|
|
|
|
shared := vol0.Copy()
|
|
|
|
shared.ID = "volume-shared"
|
|
|
|
// TODO: this should cause a test failure, see GH-10157
|
|
|
|
// replace this value with structs.CSIVolumeAccessModeSingleNodeWriter
|
|
|
|
// once its been fixed
|
|
|
|
shared.AccessMode = structs.CSIVolumeAccessModeMultiNodeReader
|
|
|
|
|
2022-03-07 16:06:59 +00:00
|
|
|
require.NoError(h.State.UpsertCSIVolume(
|
2021-03-18 19:35:11 +00:00
|
|
|
h.NextIndex(), []*structs.CSIVolume{shared, vol0, vol1, vol2}))
|
|
|
|
|
|
|
|
// Create a job that uses both
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 3
|
|
|
|
job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{
|
|
|
|
"shared": {
|
|
|
|
Type: "csi",
|
|
|
|
Name: "shared",
|
|
|
|
Source: "volume-shared",
|
|
|
|
ReadOnly: true,
|
|
|
|
},
|
|
|
|
"unique": {
|
|
|
|
Type: "csi",
|
|
|
|
Name: "unique",
|
|
|
|
Source: "volume-unique",
|
|
|
|
PerAlloc: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup,
|
|
|
|
h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
|
|
|
|
// Process the evaluation and expect a single plan without annotations
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Len(h.Plans, 1, "expected one plan")
|
|
|
|
require.Nil(h.Plans[0].Annotations, "expected no annotations")
|
|
|
|
|
|
|
|
// Expect the eval has not spawned a blocked eval
|
|
|
|
require.Equal(len(h.CreateEvals), 0)
|
|
|
|
require.Equal("", h.Evals[0].BlockedEval, "did not expect a blocked eval")
|
|
|
|
require.Equal(structs.EvalStatusComplete, h.Evals[0].Status)
|
|
|
|
|
|
|
|
// Ensure the plan allocated and we got expected placements
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range h.Plans[0].NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
require.Len(planned, 3, "expected 3 planned allocations")
|
|
|
|
|
|
|
|
out, err := h.State.AllocsByJob(nil, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Len(out, 3, "expected 3 placed allocations")
|
|
|
|
|
|
|
|
// Allocations don't have references to the actual volumes assigned, but
|
|
|
|
// because we set a max of 2 volumes per Node plugin, we can verify that
|
|
|
|
// they've been properly scheduled by making sure they're all on separate
|
|
|
|
// clients.
|
|
|
|
seen := map[string]struct{}{}
|
|
|
|
for _, alloc := range out {
|
|
|
|
_, ok := seen[alloc.NodeID]
|
|
|
|
require.False(ok, "allocations should be scheduled to separate nodes")
|
|
|
|
seen[alloc.NodeID] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the job to 5 instances
|
|
|
|
job.TaskGroups[0].Count = 5
|
|
|
|
require.NoError(h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
|
|
|
|
|
|
|
// Create a new eval and process it. It should not create a new plan.
|
|
|
|
eval.ID = uuid.Generate()
|
|
|
|
require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup,
|
|
|
|
h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
err = h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Len(h.Plans, 1, "expected one plan")
|
|
|
|
|
|
|
|
// Expect the eval to have failed
|
|
|
|
require.NotEqual("", h.Evals[1].BlockedEval,
|
|
|
|
"expected a blocked eval to be spawned")
|
|
|
|
require.Equal(2, h.Evals[1].QueuedAllocations["web"], "expected 2 queued allocs")
|
2022-06-07 17:31:10 +00:00
|
|
|
require.Equal(5, h.Evals[1].FailedTGAllocs["web"].
|
2021-03-18 19:35:11 +00:00
|
|
|
ConstraintFiltered["missing CSI Volume volume-unique[3]"])
|
|
|
|
|
|
|
|
// Upsert 2 more per-alloc volumes
|
|
|
|
vol4 := vol0.Copy()
|
|
|
|
vol4.ID = "volume-unique[3]"
|
|
|
|
vol5 := vol0.Copy()
|
|
|
|
vol5.ID = "volume-unique[4]"
|
2022-03-07 16:06:59 +00:00
|
|
|
require.NoError(h.State.UpsertCSIVolume(
|
2021-03-18 19:35:11 +00:00
|
|
|
h.NextIndex(), []*structs.CSIVolume{vol4, vol5}))
|
|
|
|
|
|
|
|
// Process again with failure fixed. It should create a new plan
|
|
|
|
eval.ID = uuid.Generate()
|
|
|
|
require.NoError(h.State.UpsertEvals(structs.MsgTypeTestSetup,
|
|
|
|
h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
err = h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Len(h.Plans, 2, "expected two plans")
|
|
|
|
require.Nil(h.Plans[1].Annotations, "expected no annotations")
|
|
|
|
|
|
|
|
require.Equal("", h.Evals[2].BlockedEval, "did not expect a blocked eval")
|
|
|
|
require.Len(h.Evals[2].FailedTGAllocs, 0)
|
|
|
|
|
|
|
|
// Ensure the plan allocated and we got expected placements
|
|
|
|
planned = []*structs.Allocation{}
|
|
|
|
for _, allocList := range h.Plans[1].NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
require.Len(planned, 2, "expected 2 new planned allocations")
|
|
|
|
|
|
|
|
out, err = h.State.AllocsByJob(nil, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(err)
|
|
|
|
require.Len(out, 5, "expected 5 placed allocations total")
|
|
|
|
|
|
|
|
// Make sure they're still all on seperate clients
|
|
|
|
seen = map[string]struct{}{}
|
|
|
|
for _, alloc := range out {
|
|
|
|
_, ok := seen[alloc.NodeID]
|
|
|
|
require.False(ok, "allocations should be scheduled to separate nodes")
|
|
|
|
seen[alloc.NodeID] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2020-12-17 23:21:46 +00:00
|
|
|
|
2022-06-07 17:31:10 +00:00
|
|
|
func TestServiceSched_CSITopology(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
zones := []string{"zone-0", "zone-1", "zone-2", "zone-3"}
|
|
|
|
|
|
|
|
// Create some nodes, each running a CSI plugin with topology for
|
|
|
|
// a different "zone"
|
|
|
|
for i := 0; i < 12; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
node.Datacenter = zones[i%4]
|
|
|
|
node.CSINodePlugins = map[string]*structs.CSIInfo{
|
|
|
|
"test-plugin-" + zones[i%4]: {
|
|
|
|
PluginID: "test-plugin-" + zones[i%4],
|
|
|
|
Healthy: true,
|
|
|
|
NodeInfo: &structs.CSINodeInfo{
|
|
|
|
MaxVolumes: 3,
|
|
|
|
AccessibleTopology: &structs.CSITopology{
|
|
|
|
Segments: map[string]string{"zone": zones[i%4]}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertNode(
|
|
|
|
structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
}
|
|
|
|
|
|
|
|
// create 2 per-alloc volumes for those zones
|
|
|
|
vol0 := structs.NewCSIVolume("myvolume[0]", 0)
|
|
|
|
vol0.PluginID = "test-plugin-zone-0"
|
|
|
|
vol0.Namespace = structs.DefaultNamespace
|
|
|
|
vol0.AccessMode = structs.CSIVolumeAccessModeSingleNodeWriter
|
|
|
|
vol0.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem
|
|
|
|
vol0.RequestedTopologies = &structs.CSITopologyRequest{
|
|
|
|
Required: []*structs.CSITopology{{
|
|
|
|
Segments: map[string]string{"zone": "zone-0"},
|
|
|
|
}},
|
|
|
|
}
|
|
|
|
|
|
|
|
vol1 := vol0.Copy()
|
|
|
|
vol1.ID = "myvolume[1]"
|
|
|
|
vol1.PluginID = "test-plugin-zone-1"
|
|
|
|
vol1.RequestedTopologies.Required[0].Segments["zone"] = "zone-1"
|
|
|
|
|
|
|
|
require.NoError(t, h.State.UpsertCSIVolume(
|
|
|
|
h.NextIndex(), []*structs.CSIVolume{vol0, vol1}))
|
|
|
|
|
|
|
|
// Create a job that uses those volumes
|
|
|
|
job := mock.Job()
|
|
|
|
job.Datacenters = zones
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{
|
|
|
|
"myvolume": {
|
|
|
|
Type: "csi",
|
|
|
|
Name: "unique",
|
|
|
|
Source: "myvolume",
|
|
|
|
PerAlloc: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup,
|
|
|
|
h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
|
|
|
|
// Process the evaluation and expect a single plan without annotations
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, h.Plans, 1, "expected one plan")
|
|
|
|
require.Nil(t, h.Plans[0].Annotations, "expected no annotations")
|
|
|
|
|
|
|
|
// Expect the eval has not spawned a blocked eval
|
|
|
|
require.Equal(t, len(h.CreateEvals), 0)
|
|
|
|
require.Equal(t, "", h.Evals[0].BlockedEval, "did not expect a blocked eval")
|
|
|
|
require.Equal(t, structs.EvalStatusComplete, h.Evals[0].Status)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-12-17 23:21:46 +00:00
|
|
|
// TestPropagateTaskState asserts that propagateTaskState only copies state
|
|
|
|
// when the previous allocation is lost or draining.
|
|
|
|
func TestPropagateTaskState(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-12-17 23:21:46 +00:00
|
|
|
|
|
|
|
const taskName = "web"
|
|
|
|
taskHandle := &structs.TaskHandle{
|
|
|
|
Version: 1,
|
|
|
|
DriverState: []byte("driver-state"),
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
name string
|
|
|
|
prevAlloc *structs.Allocation
|
|
|
|
prevLost bool
|
|
|
|
copied bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "LostWithState",
|
|
|
|
prevAlloc: &structs.Allocation{
|
|
|
|
ClientStatus: structs.AllocClientStatusRunning,
|
|
|
|
DesiredTransition: structs.DesiredTransition{},
|
|
|
|
TaskStates: map[string]*structs.TaskState{
|
2021-10-01 13:59:55 +00:00
|
|
|
taskName: {
|
2020-12-17 23:21:46 +00:00
|
|
|
TaskHandle: taskHandle,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
prevLost: true,
|
|
|
|
copied: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "DrainedWithState",
|
|
|
|
prevAlloc: &structs.Allocation{
|
|
|
|
ClientStatus: structs.AllocClientStatusRunning,
|
|
|
|
DesiredTransition: structs.DesiredTransition{
|
2022-08-17 16:26:34 +00:00
|
|
|
Migrate: pointer.Of(true),
|
2020-12-17 23:21:46 +00:00
|
|
|
},
|
|
|
|
TaskStates: map[string]*structs.TaskState{
|
2021-10-01 13:59:55 +00:00
|
|
|
taskName: {
|
2020-12-17 23:21:46 +00:00
|
|
|
TaskHandle: taskHandle,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
prevLost: false,
|
|
|
|
copied: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "LostWithoutState",
|
|
|
|
prevAlloc: &structs.Allocation{
|
|
|
|
ClientStatus: structs.AllocClientStatusRunning,
|
|
|
|
DesiredTransition: structs.DesiredTransition{},
|
|
|
|
TaskStates: map[string]*structs.TaskState{
|
2021-10-01 13:59:55 +00:00
|
|
|
taskName: {},
|
2020-12-17 23:21:46 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
prevLost: true,
|
|
|
|
copied: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "DrainedWithoutState",
|
|
|
|
prevAlloc: &structs.Allocation{
|
|
|
|
ClientStatus: structs.AllocClientStatusRunning,
|
|
|
|
DesiredTransition: structs.DesiredTransition{
|
2022-08-17 16:26:34 +00:00
|
|
|
Migrate: pointer.Of(true),
|
2020-12-17 23:21:46 +00:00
|
|
|
},
|
|
|
|
TaskStates: map[string]*structs.TaskState{
|
2021-10-01 13:59:55 +00:00
|
|
|
taskName: {},
|
2020-12-17 23:21:46 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
prevLost: false,
|
|
|
|
copied: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "TerminalWithState",
|
|
|
|
prevAlloc: &structs.Allocation{
|
|
|
|
ClientStatus: structs.AllocClientStatusComplete,
|
|
|
|
DesiredTransition: structs.DesiredTransition{},
|
|
|
|
TaskStates: map[string]*structs.TaskState{
|
2021-10-01 13:59:55 +00:00
|
|
|
taskName: {
|
2020-12-17 23:21:46 +00:00
|
|
|
TaskHandle: taskHandle,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
prevLost: false,
|
|
|
|
copied: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := range cases {
|
|
|
|
tc := cases[i]
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
newAlloc := &structs.Allocation{
|
|
|
|
// Required by propagateTaskState and populated
|
|
|
|
// by the scheduler's node iterator.
|
|
|
|
AllocatedResources: &structs.AllocatedResources{
|
|
|
|
Tasks: map[string]*structs.AllocatedTaskResources{
|
|
|
|
taskName: nil, // value isn't used
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
propagateTaskState(newAlloc, tc.prevAlloc, tc.prevLost)
|
|
|
|
|
|
|
|
if tc.copied {
|
|
|
|
// Assert state was copied
|
|
|
|
require.NotNil(t, newAlloc.TaskStates)
|
|
|
|
require.Contains(t, newAlloc.TaskStates, taskName)
|
|
|
|
require.Equal(t, taskHandle, newAlloc.TaskStates[taskName].TaskHandle)
|
|
|
|
} else {
|
|
|
|
// Assert state was *not* copied
|
|
|
|
require.Empty(t, newAlloc.TaskStates,
|
|
|
|
"expected task states not to be copied")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2022-02-16 18:50:20 +00:00
|
|
|
|
|
|
|
// Tests that a client disconnect generates attribute updates and follow up evals.
|
|
|
|
func TestServiceSched_Client_Disconnect_Creates_Updates_and_Evals(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
count := 1
|
|
|
|
maxClientDisconnect := 10 * time.Minute
|
|
|
|
|
|
|
|
disconnectedNode, job, unknownAllocs := initNodeAndAllocs(t, h, count, maxClientDisconnect,
|
|
|
|
structs.NodeStatusReady, structs.AllocClientStatusRunning)
|
|
|
|
|
|
|
|
// Now disconnect the node
|
|
|
|
disconnectedNode.Status = structs.NodeStatusDisconnected
|
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), disconnectedNode))
|
|
|
|
|
|
|
|
// Create an evaluation triggered by the disconnect
|
|
|
|
evals := []*structs.Evaluation{{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: disconnectedNode.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}}
|
|
|
|
nodeStatusUpdateEval := evals[0]
|
|
|
|
require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), evals))
|
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, nodeStatusUpdateEval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, structs.EvalStatusComplete, h.Evals[0].Status)
|
|
|
|
require.Len(t, h.Plans, 1, "plan")
|
|
|
|
|
|
|
|
// One followup delayed eval created
|
|
|
|
require.Len(t, h.CreateEvals, 1)
|
|
|
|
followUpEval := h.CreateEvals[0]
|
|
|
|
require.Equal(t, nodeStatusUpdateEval.ID, followUpEval.PreviousEval)
|
|
|
|
require.Equal(t, "pending", followUpEval.Status)
|
|
|
|
require.NotEmpty(t, followUpEval.WaitUntil)
|
|
|
|
|
|
|
|
// Insert eval in the state store
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
found, err := h.State.EvalByID(nil, followUpEval.ID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if found == nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, nodeStatusUpdateEval.ID, found.PreviousEval)
|
|
|
|
require.Equal(t, "pending", found.Status)
|
|
|
|
require.NotEmpty(t, found.WaitUntil)
|
|
|
|
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Validate that the ClientStatus updates are part of the plan.
|
|
|
|
require.Len(t, h.Plans[0].NodeAllocation[disconnectedNode.ID], count)
|
|
|
|
// Pending update should have unknown status.
|
|
|
|
for _, nodeAlloc := range h.Plans[0].NodeAllocation[disconnectedNode.ID] {
|
|
|
|
require.Equal(t, nodeAlloc.ClientStatus, structs.AllocClientStatusUnknown)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Simulate that NodeAllocation got processed.
|
|
|
|
err = h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), h.Plans[0].NodeAllocation[disconnectedNode.ID])
|
|
|
|
require.NoError(t, err, "plan.NodeUpdate")
|
|
|
|
|
|
|
|
// Validate that the StateStore Upsert applied the ClientStatus we specified.
|
|
|
|
for _, alloc := range unknownAllocs {
|
|
|
|
alloc, err = h.State.AllocByID(nil, alloc.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, alloc.ClientStatus, structs.AllocClientStatusUnknown)
|
|
|
|
|
|
|
|
// Allocations have been transitioned to unknown
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusRun, alloc.DesiredStatus)
|
|
|
|
require.Equal(t, structs.AllocClientStatusUnknown, alloc.ClientStatus)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func initNodeAndAllocs(t *testing.T, h *Harness, allocCount int,
|
|
|
|
maxClientDisconnect time.Duration, nodeStatus, clientStatus string) (*structs.Node, *structs.Job, []*structs.Allocation) {
|
|
|
|
// Node, which is ready
|
|
|
|
node := mock.Node()
|
|
|
|
node.Status = nodeStatus
|
|
|
|
require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node))
|
|
|
|
|
|
|
|
// Job with allocations and max_client_disconnect
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = allocCount
|
|
|
|
job.TaskGroups[0].MaxClientDisconnect = &maxClientDisconnect
|
|
|
|
require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), job))
|
|
|
|
|
|
|
|
allocs := make([]*structs.Allocation, allocCount)
|
|
|
|
for i := 0; i < allocCount; i++ {
|
|
|
|
// Alloc for the running group
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc.ClientStatus = clientStatus
|
|
|
|
|
|
|
|
allocs[i] = alloc
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs))
|
|
|
|
return node, job, allocs
|
|
|
|
}
|