2015-08-11 21:54:21 +00:00
|
|
|
package scheduler
|
|
|
|
|
|
|
|
import (
|
2015-09-07 02:47:02 +00:00
|
|
|
"fmt"
|
2016-05-05 18:21:58 +00:00
|
|
|
"reflect"
|
2016-08-09 21:48:25 +00:00
|
|
|
"sort"
|
2015-08-11 21:54:21 +00:00
|
|
|
"testing"
|
2015-09-07 22:17:39 +00:00
|
|
|
"time"
|
2015-08-11 21:54:21 +00:00
|
|
|
|
2017-02-08 05:22:48 +00:00
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
2017-07-06 15:58:15 +00:00
|
|
|
"github.com/hashicorp/nomad/helper"
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2015-08-11 21:54:21 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2020-05-13 20:39:04 +00:00
|
|
|
"github.com/hashicorp/nomad/testutil"
|
2017-07-31 18:17:35 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2018-01-04 22:20:32 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2015-08-11 21:54:21 +00:00
|
|
|
)
|
|
|
|
|
2015-08-14 01:51:08 +00:00
|
|
|
func TestServiceSched_JobRegister(t *testing.T) {
|
2015-08-14 05:07:01 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2015-08-14 05:07:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2015-08-14 05:07:01 +00:00
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Create a mock evaluation to register the job
|
2015-08-14 05:07:01 +00:00
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-14 05:07:01 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-14 05:07:01 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2015-08-14 05:07:01 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
2016-05-05 18:21:58 +00:00
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
2016-05-19 20:09:52 +00:00
|
|
|
// Ensure the eval has no spawned blocked eval
|
2016-08-25 20:26:28 +00:00
|
|
|
if len(h.CreateEvals) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
2016-05-25 01:12:59 +00:00
|
|
|
if h.Evals[0].BlockedEval != "" {
|
2016-05-19 20:09:52 +00:00
|
|
|
t.Fatalf("bad: %#v", h.Evals[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-14 05:11:32 +00:00
|
|
|
// Ensure the plan allocated
|
2015-08-14 05:07:01 +00:00
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2015-08-14 05:07:01 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
2015-08-15 21:47:13 +00:00
|
|
|
|
2015-11-24 00:32:30 +00:00
|
|
|
// Ensure different ports were used.
|
2019-01-05 00:08:47 +00:00
|
|
|
used := make(map[int]map[string]struct{})
|
2015-11-24 00:32:30 +00:00
|
|
|
for _, alloc := range out {
|
|
|
|
for _, resource := range alloc.TaskResources {
|
|
|
|
for _, port := range resource.Networks[0].DynamicPorts {
|
2019-01-05 00:08:47 +00:00
|
|
|
nodeMap, ok := used[port.Value]
|
|
|
|
if !ok {
|
|
|
|
nodeMap = make(map[string]struct{})
|
|
|
|
used[port.Value] = nodeMap
|
2015-11-24 00:32:30 +00:00
|
|
|
}
|
2019-01-05 00:08:47 +00:00
|
|
|
if _, ok := nodeMap[alloc.NodeID]; ok {
|
|
|
|
t.Fatalf("Port collision on node %q %v", alloc.NodeID, port.Value)
|
|
|
|
}
|
|
|
|
nodeMap[alloc.NodeID] = struct{}{}
|
2015-11-24 00:32:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-15 21:47:13 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2015-08-14 01:51:08 +00:00
|
|
|
}
|
|
|
|
|
2016-08-30 22:36:30 +00:00
|
|
|
func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-08-30 22:36:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2016-09-14 22:43:42 +00:00
|
|
|
job.TaskGroups[0].EphemeralDisk.Sticky = true
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-08-30 22:36:30 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-30 22:36:30 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-30 22:36:30 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-08-30 22:36:30 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
if err := h.Process(NewServiceScheduler, eval); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
plan := h.Plans[0]
|
2017-06-06 21:08:46 +00:00
|
|
|
planned := make(map[string]*structs.Allocation)
|
2016-08-30 22:36:30 +00:00
|
|
|
for _, allocList := range plan.NodeAllocation {
|
2017-06-06 21:08:46 +00:00
|
|
|
for _, alloc := range allocList {
|
|
|
|
planned[alloc.ID] = alloc
|
|
|
|
}
|
2016-08-30 22:36:30 +00:00
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
2017-06-06 21:08:46 +00:00
|
|
|
// Update the job to force a rolling upgrade
|
|
|
|
updated := job.Copy()
|
|
|
|
updated.TaskGroups[0].Tasks[0].Resources.CPU += 10
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), updated))
|
2016-08-30 22:36:30 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to handle the update
|
|
|
|
eval = &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-30 22:36:30 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-30 22:36:30 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-08-30 22:36:30 +00:00
|
|
|
h1 := NewHarnessWithState(t, h.State)
|
|
|
|
if err := h1.Process(NewServiceScheduler, eval); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure we have created only one new allocation
|
2017-06-06 21:08:46 +00:00
|
|
|
// Ensure a single plan
|
|
|
|
if len(h1.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h1.Plans)
|
|
|
|
}
|
2016-08-30 22:36:30 +00:00
|
|
|
plan = h1.Plans[0]
|
|
|
|
var newPlanned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
newPlanned = append(newPlanned, allocList...)
|
|
|
|
}
|
2017-06-06 21:08:46 +00:00
|
|
|
if len(newPlanned) != 10 {
|
2016-08-30 22:36:30 +00:00
|
|
|
t.Fatalf("bad plan: %#v", plan)
|
|
|
|
}
|
2017-06-06 21:08:46 +00:00
|
|
|
// Ensure that the new allocations were placed on the same node as the older
|
|
|
|
// ones
|
|
|
|
for _, new := range newPlanned {
|
|
|
|
if new.PreviousAllocation == "" {
|
|
|
|
t.Fatalf("new alloc %q doesn't have a previous allocation", new.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
old, ok := planned[new.PreviousAllocation]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("new alloc %q previous allocation doesn't match any prior placed alloc (%q)", new.ID, new.PreviousAllocation)
|
|
|
|
}
|
|
|
|
if new.NodeID != old.NodeID {
|
|
|
|
t.Fatalf("new alloc and old alloc node doesn't match; got %q; want %q", new.NodeID, old.NodeID)
|
|
|
|
}
|
2016-08-30 22:36:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-25 20:26:28 +00:00
|
|
|
func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-08-25 20:26:28 +00:00
|
|
|
|
|
|
|
// Create a job with count 2 and disk as 60GB so that only one allocation
|
|
|
|
// can fit
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
2016-09-14 22:43:42 +00:00
|
|
|
job.TaskGroups[0].EphemeralDisk.SizeMB = 88 * 1024
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-08-25 20:26:28 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-25 20:26:28 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-25 20:26:28 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-08-25 20:26:28 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval has a blocked eval
|
|
|
|
if len(h.CreateEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
2018-09-24 21:47:49 +00:00
|
|
|
if h.CreateEvals[0].TriggeredBy != structs.EvalTriggerQueuedAllocs {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals[0])
|
|
|
|
}
|
|
|
|
|
2016-08-25 20:26:28 +00:00
|
|
|
// Ensure the plan allocated only one allocation
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-08-25 20:26:28 +00:00
|
|
|
|
|
|
|
// Ensure only one allocation was placed
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2017-03-08 19:47:55 +00:00
|
|
|
func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) {
|
2017-03-07 22:20:02 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job that uses distinct host and has count 1 higher than what is
|
|
|
|
// possible.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 11
|
|
|
|
job.Constraints = append(job.Constraints, &structs.Constraint{Operand: structs.ConstraintDistinctHosts})
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the eval has spawned blocked eval
|
|
|
|
if len(h.CreateEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan failed to alloc
|
|
|
|
outEval := h.Evals[0]
|
|
|
|
if len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %+v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure different node was used per.
|
|
|
|
used := make(map[string]struct{})
|
|
|
|
for _, alloc := range out {
|
|
|
|
if _, ok := used[alloc.NodeID]; ok {
|
|
|
|
t.Fatalf("Node collision %v", alloc.NodeID)
|
|
|
|
}
|
|
|
|
used[alloc.NodeID] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
rack := "rack2"
|
|
|
|
if i < 5 {
|
|
|
|
rack = "rack1"
|
|
|
|
}
|
|
|
|
node.Meta["rack"] = rack
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job that uses distinct property and has count higher than what is
|
|
|
|
// possible.
|
|
|
|
job := mock.Job()
|
2017-07-31 23:44:17 +00:00
|
|
|
job.TaskGroups[0].Count = 8
|
2017-03-07 22:20:02 +00:00
|
|
|
job.Constraints = append(job.Constraints,
|
|
|
|
&structs.Constraint{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.rack}",
|
2017-07-31 23:44:17 +00:00
|
|
|
RTarget: "2",
|
2017-03-07 22:20:02 +00:00
|
|
|
})
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval has spawned blocked eval
|
|
|
|
if len(h.CreateEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan failed to alloc
|
|
|
|
outEval := h.Evals[0]
|
|
|
|
if len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %+v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
2017-07-31 23:44:17 +00:00
|
|
|
if len(planned) != 4 {
|
2017-03-07 22:20:02 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2017-07-31 23:44:17 +00:00
|
|
|
if len(out) != 4 {
|
2017-03-07 22:20:02 +00:00
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
2017-07-31 23:44:17 +00:00
|
|
|
// Ensure each node was only used twice
|
|
|
|
used := make(map[string]uint64)
|
2017-03-07 22:20:02 +00:00
|
|
|
for _, alloc := range out {
|
2017-07-31 23:44:17 +00:00
|
|
|
if count, _ := used[alloc.NodeID]; count > 2 {
|
|
|
|
t.Fatalf("Node %v used too much: %d", alloc.NodeID, count)
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
2017-07-31 23:44:17 +00:00
|
|
|
used[alloc.NodeID]++
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2017-03-10 00:12:43 +00:00
|
|
|
func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
node.Meta["ssd"] = "true"
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2017-03-10 00:12:43 +00:00
|
|
|
}
|
|
|
|
|
2017-07-31 23:44:17 +00:00
|
|
|
// Create a job that uses distinct property only on one task group.
|
2017-03-10 00:12:43 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups = append(job.TaskGroups, job.TaskGroups[0].Copy())
|
|
|
|
job.TaskGroups[0].Count = 1
|
|
|
|
job.TaskGroups[0].Constraints = append(job.TaskGroups[0].Constraints,
|
|
|
|
&structs.Constraint{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.ssd}",
|
|
|
|
})
|
|
|
|
|
|
|
|
job.TaskGroups[1].Name = "tg2"
|
2017-07-31 23:44:17 +00:00
|
|
|
job.TaskGroups[1].Count = 2
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-03-10 00:12:43 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-10 00:12:43 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-03-10 00:12:43 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-03-10 00:12:43 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval hasn't spawned blocked eval
|
|
|
|
if len(h.CreateEvals) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
2017-07-31 23:44:17 +00:00
|
|
|
if len(planned) != 3 {
|
2017-03-10 00:12:43 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-03-10 00:12:43 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2017-07-31 23:44:17 +00:00
|
|
|
if len(out) != 3 {
|
2017-03-10 00:12:43 +00:00
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2017-07-31 18:17:35 +00:00
|
|
|
func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
// Create a job that uses distinct property over the node-id
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 3
|
|
|
|
job.TaskGroups[0].Constraints = append(job.TaskGroups[0].Constraints,
|
|
|
|
&structs.Constraint{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${node.unique.id}",
|
|
|
|
})
|
|
|
|
assert.Nil(h.State.UpsertJob(h.NextIndex(), job), "UpsertJob")
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 6; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
|
|
|
assert.Nil(h.State.UpsertNode(h.NextIndex(), node), "UpsertNode")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create some allocations
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
assert.Nil(h.State.UpsertAllocs(h.NextIndex(), allocs), "UpsertAllocs")
|
|
|
|
|
|
|
|
// Update the count
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.TaskGroups[0].Count = 6
|
|
|
|
assert.Nil(h.State.UpsertJob(h.NextIndex(), job2), "UpsertJob")
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-31 18:17:35 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-07-31 18:17:35 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-07-31 18:17:35 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
assert.Nil(h.Process(NewServiceScheduler, eval), "Process")
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
assert.Len(h.Plans, 1, "Number of plans")
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
assert.Nil(plan.Annotations, "Plan.Annotations")
|
|
|
|
|
|
|
|
// Ensure the eval hasn't spawned blocked eval
|
|
|
|
assert.Len(h.CreateEvals, 0, "Created Evals")
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
assert.Len(planned, 6, "Planned Allocations")
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2017-07-31 18:17:35 +00:00
|
|
|
assert.Nil(err, "AllocsByJob")
|
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
assert.Len(out, 6, "Placed Allocations")
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2018-07-17 22:25:38 +00:00
|
|
|
// Test job registration with spread configured
|
|
|
|
func TestServiceSched_Spread(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
2019-01-30 20:20:38 +00:00
|
|
|
start := uint8(100)
|
|
|
|
step := uint8(10)
|
2018-07-31 02:59:35 +00:00
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
name := fmt.Sprintf("%d%% in dc1", start)
|
|
|
|
t.Run(name, func(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
2019-01-30 20:20:38 +00:00
|
|
|
remaining := uint8(100 - start)
|
2018-07-31 02:59:35 +00:00
|
|
|
// Create a job that uses spread over data center
|
|
|
|
job := mock.Job()
|
|
|
|
job.Datacenters = []string{"dc1", "dc2"}
|
|
|
|
job.TaskGroups[0].Count = 10
|
|
|
|
job.TaskGroups[0].Spreads = append(job.TaskGroups[0].Spreads,
|
|
|
|
&structs.Spread{
|
|
|
|
Attribute: "${node.datacenter}",
|
|
|
|
Weight: 100,
|
|
|
|
SpreadTarget: []*structs.SpreadTarget{
|
|
|
|
{
|
|
|
|
Value: "dc1",
|
|
|
|
Percent: start,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Value: "dc2",
|
|
|
|
Percent: remaining,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
assert.Nil(h.State.UpsertJob(h.NextIndex(), job), "UpsertJob")
|
|
|
|
// Create some nodes, half in dc2
|
|
|
|
var nodes []*structs.Node
|
|
|
|
nodeMap := make(map[string]*structs.Node)
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
if i%2 == 0 {
|
|
|
|
node.Datacenter = "dc2"
|
|
|
|
}
|
|
|
|
nodes = append(nodes, node)
|
|
|
|
assert.Nil(h.State.UpsertNode(h.NextIndex(), node), "UpsertNode")
|
|
|
|
nodeMap[node.ID] = node
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-07-31 02:59:35 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
assert.Nil(h.Process(NewServiceScheduler, eval), "Process")
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
assert.Len(h.Plans, 1, "Number of plans")
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
assert.Nil(plan.Annotations, "Plan.Annotations")
|
|
|
|
|
|
|
|
// Ensure the eval hasn't spawned blocked eval
|
|
|
|
assert.Len(h.CreateEvals, 0, "Created Evals")
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
dcAllocsMap := make(map[string]int)
|
|
|
|
for nodeId, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
dc := nodeMap[nodeId].Datacenter
|
|
|
|
c := dcAllocsMap[dc]
|
|
|
|
c += len(allocList)
|
|
|
|
dcAllocsMap[dc] = c
|
|
|
|
}
|
|
|
|
assert.Len(planned, 10, "Planned Allocations")
|
|
|
|
|
|
|
|
expectedCounts := make(map[string]int)
|
|
|
|
expectedCounts["dc1"] = 10 - i
|
|
|
|
if i > 0 {
|
|
|
|
expectedCounts["dc2"] = i
|
|
|
|
}
|
|
|
|
require.Equal(t, expectedCounts, dcAllocsMap)
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
})
|
|
|
|
start = start - step
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test job registration with even spread across dc
|
|
|
|
func TestServiceSched_EvenSpread(t *testing.T) {
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
h := NewHarness(t)
|
|
|
|
// Create a job that uses even spread over data center
|
2018-07-17 22:25:38 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.Datacenters = []string{"dc1", "dc2"}
|
|
|
|
job.TaskGroups[0].Count = 10
|
|
|
|
job.TaskGroups[0].Spreads = append(job.TaskGroups[0].Spreads,
|
|
|
|
&structs.Spread{
|
|
|
|
Attribute: "${node.datacenter}",
|
|
|
|
Weight: 100,
|
|
|
|
})
|
|
|
|
assert.Nil(h.State.UpsertJob(h.NextIndex(), job), "UpsertJob")
|
|
|
|
// Create some nodes, half in dc2
|
|
|
|
var nodes []*structs.Node
|
|
|
|
nodeMap := make(map[string]*structs.Node)
|
2018-07-31 02:59:35 +00:00
|
|
|
for i := 0; i < 10; i++ {
|
2018-07-17 22:25:38 +00:00
|
|
|
node := mock.Node()
|
|
|
|
if i%2 == 0 {
|
|
|
|
node.Datacenter = "dc2"
|
|
|
|
}
|
|
|
|
nodes = append(nodes, node)
|
|
|
|
assert.Nil(h.State.UpsertNode(h.NextIndex(), node), "UpsertNode")
|
|
|
|
nodeMap[node.ID] = node
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-07-17 22:25:38 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
assert.Nil(h.Process(NewServiceScheduler, eval), "Process")
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
assert.Len(h.Plans, 1, "Number of plans")
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
assert.Nil(plan.Annotations, "Plan.Annotations")
|
|
|
|
|
|
|
|
// Ensure the eval hasn't spawned blocked eval
|
|
|
|
assert.Len(h.CreateEvals, 0, "Created Evals")
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
dcAllocsMap := make(map[string]int)
|
|
|
|
for nodeId, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
dc := nodeMap[nodeId].Datacenter
|
|
|
|
c := dcAllocsMap[dc]
|
|
|
|
c += len(allocList)
|
|
|
|
dcAllocsMap[dc] = c
|
|
|
|
}
|
|
|
|
assert.Len(planned, 10, "Planned Allocations")
|
|
|
|
|
2018-07-31 02:59:35 +00:00
|
|
|
// Expect even split allocs across datacenter
|
2018-07-17 22:25:38 +00:00
|
|
|
expectedCounts := make(map[string]int)
|
2018-07-31 02:59:35 +00:00
|
|
|
expectedCounts["dc1"] = 5
|
|
|
|
expectedCounts["dc2"] = 5
|
|
|
|
|
2018-07-17 22:25:38 +00:00
|
|
|
require.Equal(t, expectedCounts, dcAllocsMap)
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-05-05 18:21:58 +00:00
|
|
|
func TestServiceSched_JobRegister_Annotate(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-05-05 18:21:58 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-05-05 18:21:58 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
AnnotatePlan: true,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-05-05 18:21:58 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-05-05 18:21:58 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
|
|
|
// Ensure the plan had annotations.
|
|
|
|
if plan.Annotations == nil {
|
|
|
|
t.Fatalf("expected annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
desiredTGs := plan.Annotations.DesiredTGUpdates
|
|
|
|
if l := len(desiredTGs); l != 1 {
|
|
|
|
t.Fatalf("incorrect number of task groups; got %v; want %v", l, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
desiredChanges, ok := desiredTGs["web"]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("expected task group web to have desired changes")
|
|
|
|
}
|
|
|
|
|
|
|
|
expected := &structs.DesiredUpdates{Place: 10}
|
|
|
|
if !reflect.DeepEqual(desiredChanges, expected) {
|
|
|
|
t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-17 18:02:59 +00:00
|
|
|
func TestServiceSched_JobRegister_CountZero(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-03-17 18:02:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job and set the task group count to zero.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 0
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-03-17 18:02:59 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-03-17 18:02:59 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure there was no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Ensure no allocations placed
|
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2015-08-15 20:40:13 +00:00
|
|
|
func TestServiceSched_JobRegister_AllocFail(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create NO nodes
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2015-08-15 20:40:13 +00:00
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Create a mock evaluation to register the job
|
2015-08-15 20:40:13 +00:00
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-15 20:40:13 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-15 20:40:13 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2015-08-15 20:40:13 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure no plan
|
|
|
|
if len(h.Plans) != 0 {
|
2015-08-15 20:40:13 +00:00
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure there is a follow up eval.
|
2016-01-28 21:43:48 +00:00
|
|
|
if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusBlocked {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("incorrect number of updated eval: %#v", h.Evals)
|
2015-08-15 20:40:13 +00:00
|
|
|
}
|
2016-05-19 01:11:40 +00:00
|
|
|
outEval := h.Evals[0]
|
2015-08-15 20:40:13 +00:00
|
|
|
|
2016-05-19 20:09:52 +00:00
|
|
|
// Ensure the eval has its spawned blocked eval
|
2016-05-25 01:12:59 +00:00
|
|
|
if outEval.BlockedEval != h.CreateEvals[0].ID {
|
2016-05-19 20:09:52 +00:00
|
|
|
t.Fatalf("bad: %#v", outEval)
|
|
|
|
}
|
2015-08-15 20:40:13 +00:00
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure the plan failed to alloc
|
|
|
|
if outEval == nil || len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", outEval)
|
|
|
|
}
|
2015-08-15 20:40:13 +00:00
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
metrics, ok := outEval.FailedTGAllocs[job.TaskGroups[0].Name]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs)
|
2015-08-15 20:40:13 +00:00
|
|
|
}
|
2015-08-15 21:47:13 +00:00
|
|
|
|
2015-08-16 17:03:21 +00:00
|
|
|
// Check the coalesced failures
|
2016-05-19 01:11:40 +00:00
|
|
|
if metrics.CoalescedFailures != 9 {
|
|
|
|
t.Fatalf("bad: %#v", metrics)
|
2016-01-04 22:23:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the available nodes
|
2016-05-19 01:11:40 +00:00
|
|
|
if count, ok := metrics.NodesAvailable["dc1"]; !ok || count != 0 {
|
|
|
|
t.Fatalf("bad: %#v", metrics)
|
2015-08-16 17:03:21 +00:00
|
|
|
}
|
|
|
|
|
2016-07-18 22:04:05 +00:00
|
|
|
// Check queued allocations
|
|
|
|
queued := outEval.QueuedAllocations["web"]
|
|
|
|
if queued != 10 {
|
|
|
|
t.Fatalf("expected queued: %v, actual: %v", 10, queued)
|
|
|
|
}
|
2016-01-28 21:43:48 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-05-20 23:03:53 +00:00
|
|
|
func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) {
|
2016-01-28 21:43:48 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a full node
|
|
|
|
node := mock.Node()
|
2018-10-03 16:47:18 +00:00
|
|
|
node.ReservedResources = &structs.NodeReservedResources{
|
|
|
|
Cpu: structs.NodeReservedCpuResources{
|
2018-10-04 21:33:09 +00:00
|
|
|
CpuShares: node.NodeResources.Cpu.CpuShares,
|
2018-10-03 16:47:18 +00:00
|
|
|
},
|
|
|
|
}
|
2016-01-28 21:43:48 +00:00
|
|
|
node.ComputeClass()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-01-28 21:43:48 +00:00
|
|
|
|
|
|
|
// Create an ineligible node
|
|
|
|
node2 := mock.Node()
|
|
|
|
node2.Attributes["kernel.name"] = "windows"
|
|
|
|
node2.ComputeClass()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2))
|
2016-01-28 21:43:48 +00:00
|
|
|
|
|
|
|
// Create a jobs
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-01-28 21:43:48 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-01-28 21:43:48 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-01-28 21:43:48 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure no plan
|
|
|
|
if len(h.Plans) != 0 {
|
2016-01-28 21:43:48 +00:00
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan has created a follow up eval.
|
|
|
|
if len(h.CreateEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
|
|
|
created := h.CreateEvals[0]
|
|
|
|
if created.Status != structs.EvalStatusBlocked {
|
|
|
|
t.Fatalf("bad: %#v", created)
|
|
|
|
}
|
|
|
|
|
2016-01-30 01:46:44 +00:00
|
|
|
classes := created.ClassEligibility
|
|
|
|
if len(classes) != 2 || !classes[node.ComputedClass] || classes[node2.ComputedClass] {
|
|
|
|
t.Fatalf("bad: %#v", classes)
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if created.EscapedComputedClass {
|
|
|
|
t.Fatalf("bad: %#v", created)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure there is a follow up eval.
|
|
|
|
if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusBlocked {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("incorrect number of updated eval: %#v", h.Evals)
|
|
|
|
}
|
|
|
|
outEval := h.Evals[0]
|
2016-01-28 21:43:48 +00:00
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure the plan failed to alloc
|
|
|
|
if outEval == nil || len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics, ok := outEval.FailedTGAllocs[job.TaskGroups[0].Name]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs)
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the coalesced failures
|
2016-05-19 01:11:40 +00:00
|
|
|
if metrics.CoalescedFailures != 9 {
|
|
|
|
t.Fatalf("bad: %#v", metrics)
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check the available nodes
|
2016-05-19 01:11:40 +00:00
|
|
|
if count, ok := metrics.NodesAvailable["dc1"]; !ok || count != 2 {
|
|
|
|
t.Fatalf("bad: %#v", metrics)
|
2016-01-28 21:43:48 +00:00
|
|
|
}
|
|
|
|
|
2015-08-15 21:47:13 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2015-08-15 20:40:13 +00:00
|
|
|
}
|
|
|
|
|
2016-02-04 05:22:18 +00:00
|
|
|
func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create one node
|
|
|
|
node := mock.Node()
|
|
|
|
node.NodeClass = "class_0"
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, node.ComputeClass())
|
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-02-04 05:22:18 +00:00
|
|
|
|
|
|
|
// Create a job that constrains on a node class
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].Constraints = append(job.Constraints,
|
|
|
|
&structs.Constraint{
|
2016-02-05 00:50:20 +00:00
|
|
|
LTarget: "${node.class}",
|
2016-02-04 05:22:18 +00:00
|
|
|
RTarget: "class_0",
|
|
|
|
Operand: "=",
|
|
|
|
},
|
|
|
|
)
|
|
|
|
tg2 := job.TaskGroups[0].Copy()
|
|
|
|
tg2.Name = "web2"
|
|
|
|
tg2.Constraints[1].RTarget = "class_1"
|
|
|
|
job.TaskGroups = append(job.TaskGroups, tg2)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-02-04 05:22:18 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-02-04 05:22:18 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-02-04 05:22:18 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-02-04 05:22:18 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 2 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure two allocations placed
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-05-19 01:11:40 +00:00
|
|
|
if len(out) != 2 {
|
2016-02-04 05:22:18 +00:00
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("incorrect number of updated eval: %#v", h.Evals)
|
|
|
|
}
|
|
|
|
outEval := h.Evals[0]
|
|
|
|
|
2016-05-19 20:09:52 +00:00
|
|
|
// Ensure the eval has its spawned blocked eval
|
2016-05-25 01:12:59 +00:00
|
|
|
if outEval.BlockedEval != h.CreateEvals[0].ID {
|
2016-05-19 20:09:52 +00:00
|
|
|
t.Fatalf("bad: %#v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan failed to alloc one tg
|
2016-05-19 01:11:40 +00:00
|
|
|
if outEval == nil || len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics, ok := outEval.FailedTGAllocs[tg2.Name]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the coalesced failures
|
|
|
|
if metrics.CoalescedFailures != tg2.Count-1 {
|
|
|
|
t.Fatalf("bad: %#v", metrics)
|
|
|
|
}
|
|
|
|
|
2016-02-04 05:22:18 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-06-21 00:56:49 +00:00
|
|
|
// This test just ensures the scheduler handles the eval type to avoid
|
|
|
|
// regressions.
|
|
|
|
func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) {
|
2016-05-20 23:03:53 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a job and set the task group count to zero.
|
|
|
|
job := mock.Job()
|
2016-06-21 00:56:49 +00:00
|
|
|
job.TaskGroups[0].Count = 0
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-06-21 00:56:49 +00:00
|
|
|
|
|
|
|
// Create a mock blocked evaluation
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-06-21 00:56:49 +00:00
|
|
|
Status: structs.EvalStatusBlocked,
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerMaxPlans,
|
|
|
|
JobID: job.ID,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert it into the state store
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-06-21 00:56:49 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure there was no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-07-22 23:28:21 +00:00
|
|
|
func TestServiceSched_Plan_Partial_Progress(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-07-22 23:28:21 +00:00
|
|
|
|
2016-07-25 21:56:38 +00:00
|
|
|
// Create a job with a high resource ask so that all the allocations can't
|
|
|
|
// be placed on a single node.
|
2016-07-22 23:28:21 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 3
|
|
|
|
job.TaskGroups[0].Tasks[0].Resources.CPU = 3600
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-07-22 23:28:21 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-07-22 23:28:21 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-07-22 23:28:21 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-07-22 23:28:21 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-07-22 23:28:21 +00:00
|
|
|
|
|
|
|
// Ensure only one allocations placed
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
queued := h.Evals[0].QueuedAllocations["web"]
|
|
|
|
if queued != 2 {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", 2, queued)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-06-21 00:56:49 +00:00
|
|
|
func TestServiceSched_EvaluateBlockedEval(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-05-20 23:03:53 +00:00
|
|
|
|
|
|
|
// Create a mock blocked evaluation
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-05-20 23:03:53 +00:00
|
|
|
Status: structs.EvalStatusBlocked,
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert it into the state store
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-05-20 23:03:53 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure there was no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the eval was reblocked
|
|
|
|
if len(h.ReblockEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.ReblockEvals)
|
|
|
|
}
|
|
|
|
if h.ReblockEvals[0].ID != eval.ID {
|
|
|
|
t.Fatalf("expect same eval to be reblocked; got %q; want %q", h.ReblockEvals[0].ID, eval.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval status was not updated
|
|
|
|
if len(h.Evals) != 0 {
|
|
|
|
t.Fatalf("Existing eval should not have status set")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-05-20 23:03:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job and set the task group count to zero.
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-05-20 23:03:53 +00:00
|
|
|
|
|
|
|
// Create a mock blocked evaluation
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-05-20 23:03:53 +00:00
|
|
|
Status: structs.EvalStatusBlocked,
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert it into the state store
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-05-20 23:03:53 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval has no spawned blocked eval
|
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Evals)
|
2016-05-25 17:28:25 +00:00
|
|
|
if h.Evals[0].BlockedEval != "" {
|
2016-05-20 23:03:53 +00:00
|
|
|
t.Fatalf("bad: %#v", h.Evals[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-05-20 23:03:53 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval was not reblocked
|
|
|
|
if len(h.ReblockEvals) != 0 {
|
|
|
|
t.Fatalf("Existing eval should not have been reblocked as it placed all allocations")
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2016-07-18 22:04:05 +00:00
|
|
|
|
|
|
|
// Ensure queued allocations is zero
|
|
|
|
queued := h.Evals[0].QueuedAllocations["web"]
|
|
|
|
if queued != 0 {
|
|
|
|
t.Fatalf("expected queued: %v, actual: %v", 0, queued)
|
|
|
|
}
|
2016-05-20 23:03:53 +00:00
|
|
|
}
|
|
|
|
|
2015-08-14 01:51:08 +00:00
|
|
|
func TestServiceSched_JobModify(t *testing.T) {
|
2015-08-14 05:14:37 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2015-08-14 05:14:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2015-08-14 05:14:37 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
2015-09-07 19:27:12 +00:00
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2015-08-14 05:14:37 +00:00
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2015-08-14 05:14:37 +00:00
|
|
|
|
2015-09-18 04:25:55 +00:00
|
|
|
// Add a few terminal status allocations, these should be ignored
|
|
|
|
var terminal []*structs.Allocation
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2016-07-13 19:20:46 +00:00
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
2015-09-18 04:25:55 +00:00
|
|
|
terminal = append(terminal, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), terminal))
|
2015-09-18 04:25:55 +00:00
|
|
|
|
2015-08-14 05:14:37 +00:00
|
|
|
// Update the job
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
// Update the task, such that it cannot be done in-place
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
|
2015-08-14 05:14:37 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-14 05:14:37 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-14 05:14:37 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-08-14 05:14:37 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all allocs
|
2015-08-26 00:06:06 +00:00
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
2015-08-14 05:14:37 +00:00
|
|
|
}
|
2015-08-26 00:06:06 +00:00
|
|
|
if len(update) != len(allocs) {
|
2015-08-14 05:14:37 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2015-08-14 05:14:37 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2016-08-30 22:36:30 +00:00
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
2015-08-14 05:14:37 +00:00
|
|
|
if len(out) != 10 {
|
2015-08-26 00:06:06 +00:00
|
|
|
t.Fatalf("bad: %#v", out)
|
2015-08-14 05:14:37 +00:00
|
|
|
}
|
2015-08-15 21:47:13 +00:00
|
|
|
|
2016-03-17 18:02:59 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-03-21 21:17:37 +00:00
|
|
|
// Have a single node and submit a job. Increment the count such that all fit
|
|
|
|
// on the node but the node doesn't have enough resources to fit the new count +
|
|
|
|
// 1. This tests that we properly discount the resources of existing allocs.
|
|
|
|
func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create one node
|
|
|
|
node := mock.Node()
|
2018-10-04 21:33:09 +00:00
|
|
|
node.NodeResources.Cpu.CpuShares = 1000
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
// Generate a fake job with one allocation
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Tasks[0].Resources.CPU = 256
|
|
|
|
job2 := job.Copy()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
2018-10-03 16:47:18 +00:00
|
|
|
alloc.AllocatedResources.Tasks["web"].Cpu.CpuShares = 256
|
2016-03-21 21:17:37 +00:00
|
|
|
allocs = append(allocs, alloc)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
// Update the job to count 3
|
|
|
|
job2.TaskGroups[0].Count = 3
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-03-21 21:17:37 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-03-21 21:17:37 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan didn't evicted the alloc
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
if len(update) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
2016-03-22 00:23:04 +00:00
|
|
|
if len(planned) != 3 {
|
2016-03-21 21:17:37 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
2016-05-19 01:11:40 +00:00
|
|
|
// Ensure the plan had no failures
|
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("incorrect number of updated eval: %#v", h.Evals)
|
|
|
|
}
|
|
|
|
outEval := h.Evals[0]
|
|
|
|
if outEval == nil || len(outEval.FailedTGAllocs) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", outEval)
|
2016-03-21 21:17:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-03-21 21:17:37 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2016-08-30 22:36:30 +00:00
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
2016-03-22 00:23:04 +00:00
|
|
|
if len(out) != 3 {
|
2016-03-21 21:17:37 +00:00
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-03-17 18:02:59 +00:00
|
|
|
func TestServiceSched_JobModify_CountZero(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-03-17 18:02:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
2017-05-31 18:34:46 +00:00
|
|
|
alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i))
|
2016-03-17 18:02:59 +00:00
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Add a few terminal status allocations, these should be ignored
|
|
|
|
var terminal []*structs.Allocation
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
2017-05-31 18:34:46 +00:00
|
|
|
alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i))
|
2016-07-13 19:20:46 +00:00
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
2016-03-17 18:02:59 +00:00
|
|
|
terminal = append(terminal, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), terminal))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Update the job to be count zero
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
|
|
|
job2.TaskGroups[0].Count = 0
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-03-17 18:02:59 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-03-17 18:02:59 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all allocs
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
if len(update) != len(allocs) {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan didn't allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-03-17 18:02:59 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2016-08-30 22:36:30 +00:00
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
2016-03-17 18:02:59 +00:00
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
2015-08-15 21:47:13 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2015-08-14 01:51:08 +00:00
|
|
|
}
|
|
|
|
|
2015-09-07 22:17:39 +00:00
|
|
|
func TestServiceSched_JobModify_Rolling(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2015-09-07 22:17:39 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2015-09-07 22:17:39 +00:00
|
|
|
|
|
|
|
// Update the job
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
2017-06-06 21:08:46 +00:00
|
|
|
desiredUpdates := 4
|
|
|
|
job2.TaskGroups[0].Update = &structs.UpdateStrategy{
|
|
|
|
MaxParallel: desiredUpdates,
|
|
|
|
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
|
|
|
|
MinHealthyTime: 10 * time.Second,
|
|
|
|
HealthyDeadline: 10 * time.Minute,
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update the task, such that it cannot be done in-place
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
|
2015-09-07 22:17:39 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-09-07 22:17:39 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-09-07 22:17:39 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted only MaxParallel
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
2017-06-06 21:08:46 +00:00
|
|
|
if len(update) != desiredUpdates {
|
|
|
|
t.Fatalf("bad: got %d; want %d: %#v", len(update), desiredUpdates, plan)
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
2017-06-06 21:08:46 +00:00
|
|
|
if len(planned) != desiredUpdates {
|
2015-09-07 22:17:39 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
2017-07-06 00:13:45 +00:00
|
|
|
// Check that the deployment id is attached to the eval
|
|
|
|
if h.Evals[0].DeploymentID == "" {
|
|
|
|
t.Fatalf("Eval not annotated with deployment id")
|
|
|
|
}
|
|
|
|
|
2017-06-06 21:08:46 +00:00
|
|
|
// Ensure a deployment was created
|
2017-07-04 20:31:01 +00:00
|
|
|
if plan.Deployment == nil {
|
2017-06-06 21:08:46 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
2017-07-04 20:31:01 +00:00
|
|
|
state, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name]
|
2017-06-06 21:08:46 +00:00
|
|
|
if !ok {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
2017-06-06 21:08:46 +00:00
|
|
|
if state.DesiredTotal != 10 && state.DesiredCanaries != 0 {
|
|
|
|
t.Fatalf("bad: %#v", state)
|
2015-09-07 22:17:39 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-18 00:18:12 +00:00
|
|
|
// This tests that the old allocation is stopped before placing.
|
2017-07-20 19:23:40 +00:00
|
|
|
// It is critical to test that the updated job attempts to place more
|
|
|
|
// allocations as this allows us to assert that destructive changes are done
|
|
|
|
// first.
|
2017-07-18 00:18:12 +00:00
|
|
|
func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
2018-10-03 16:47:18 +00:00
|
|
|
// Create a node and clear the reserved resources
|
2017-07-18 00:18:12 +00:00
|
|
|
node := mock.Node()
|
2018-10-03 16:47:18 +00:00
|
|
|
node.ReservedResources = nil
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2017-07-18 00:18:12 +00:00
|
|
|
|
2018-10-03 16:47:18 +00:00
|
|
|
// Create a resource ask that is the same as the resources available on the
|
|
|
|
// node
|
2018-10-04 21:33:09 +00:00
|
|
|
cpu := node.NodeResources.Cpu.CpuShares
|
2018-10-03 16:47:18 +00:00
|
|
|
mem := node.NodeResources.Memory.MemoryMB
|
|
|
|
|
|
|
|
request := &structs.Resources{
|
|
|
|
CPU: int(cpu),
|
|
|
|
MemoryMB: int(mem),
|
|
|
|
}
|
|
|
|
allocated := &structs.AllocatedResources{
|
|
|
|
Tasks: map[string]*structs.AllocatedTaskResources{
|
|
|
|
"web": {
|
|
|
|
Cpu: structs.AllocatedCpuResources{
|
|
|
|
CpuShares: cpu,
|
|
|
|
},
|
|
|
|
Memory: structs.AllocatedMemoryResources{
|
|
|
|
MemoryMB: mem,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2017-07-18 00:18:12 +00:00
|
|
|
|
|
|
|
// Generate a fake job with one alloc that consumes the whole node
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 1
|
2018-10-03 16:47:18 +00:00
|
|
|
job.TaskGroups[0].Tasks[0].Resources = request
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-07-18 00:18:12 +00:00
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
2018-10-03 16:47:18 +00:00
|
|
|
alloc.AllocatedResources = allocated
|
2017-07-18 00:18:12 +00:00
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
|
2017-07-18 00:18:12 +00:00
|
|
|
|
2017-07-20 19:23:40 +00:00
|
|
|
// Update the job to place more versions of the task group, drop the count
|
|
|
|
// and force destructive updates
|
2017-07-18 00:18:12 +00:00
|
|
|
job2 := job.Copy()
|
2017-07-20 19:23:40 +00:00
|
|
|
job2.TaskGroups[0].Count = 5
|
2017-07-18 00:18:12 +00:00
|
|
|
job2.TaskGroups[0].Update = &structs.UpdateStrategy{
|
2017-08-21 21:07:54 +00:00
|
|
|
MaxParallel: 5,
|
2017-07-18 00:18:12 +00:00
|
|
|
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
|
|
|
|
MinHealthyTime: 10 * time.Second,
|
|
|
|
HealthyDeadline: 10 * time.Minute,
|
|
|
|
}
|
2018-10-03 16:47:18 +00:00
|
|
|
job2.TaskGroups[0].Tasks[0].Resources = mock.Job().TaskGroups[0].Tasks[0].Resources
|
2017-07-18 00:18:12 +00:00
|
|
|
|
|
|
|
// Update the task, such that it cannot be done in-place
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
|
2017-07-18 00:18:12 +00:00
|
|
|
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-18 00:18:12 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-07-18 00:18:12 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-07-18 00:18:12 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted only MaxParallel
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
if len(update) != 1 {
|
|
|
|
t.Fatalf("bad: got %d; want %d: %#v", len(update), 1, plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
2018-10-03 16:47:18 +00:00
|
|
|
if len(planned) != 5 {
|
2017-07-18 00:18:12 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
|
|
|
// Check that the deployment id is attached to the eval
|
|
|
|
if h.Evals[0].DeploymentID == "" {
|
|
|
|
t.Fatalf("Eval not annotated with deployment id")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a deployment was created
|
|
|
|
if plan.Deployment == nil {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
state, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
2018-10-03 16:47:18 +00:00
|
|
|
if state.DesiredTotal != 5 || state.DesiredCanaries != 0 {
|
2017-07-18 00:18:12 +00:00
|
|
|
t.Fatalf("bad: %#v", state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-06 15:58:15 +00:00
|
|
|
func TestServiceSched_JobModify_Canaries(t *testing.T) {
|
2015-09-07 19:27:12 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2015-09-07 19:27:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
// Update the job
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
2017-07-06 15:58:15 +00:00
|
|
|
desiredUpdates := 2
|
|
|
|
job2.TaskGroups[0].Update = &structs.UpdateStrategy{
|
|
|
|
MaxParallel: desiredUpdates,
|
|
|
|
Canary: desiredUpdates,
|
|
|
|
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
|
|
|
|
MinHealthyTime: 10 * time.Second,
|
|
|
|
HealthyDeadline: 10 * time.Minute,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the task, such that it cannot be done in-place
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-06 15:58:15 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-07-06 15:58:15 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted nothing
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
if len(update) != 0 {
|
|
|
|
t.Fatalf("bad: got %d; want %d: %#v", len(update), 0, plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != desiredUpdates {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
2018-04-19 20:58:06 +00:00
|
|
|
for _, canary := range planned {
|
|
|
|
if canary.DeploymentStatus == nil || !canary.DeploymentStatus.Canary {
|
|
|
|
t.Fatalf("expected canary field to be set on canary alloc %q", canary.ID)
|
|
|
|
}
|
|
|
|
}
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
|
|
|
// Check that the deployment id is attached to the eval
|
|
|
|
if h.Evals[0].DeploymentID == "" {
|
|
|
|
t.Fatalf("Eval not annotated with deployment id")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a deployment was created
|
|
|
|
if plan.Deployment == nil {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
2020-01-22 20:34:03 +00:00
|
|
|
|
|
|
|
// Ensure local state was not altered in scheduler
|
|
|
|
staleState, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name]
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
require.Equal(t, 0, len(staleState.PlacedCanaries))
|
|
|
|
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
|
|
|
|
// Grab the latest state
|
|
|
|
deploy, err := h.State.DeploymentByID(ws, plan.Deployment.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
state, ok := deploy.TaskGroups[job.TaskGroups[0].Name]
|
|
|
|
require.True(t, ok)
|
|
|
|
|
|
|
|
require.Equal(t, 10, state.DesiredTotal)
|
|
|
|
require.Equal(t, state.DesiredCanaries, desiredUpdates)
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
// Assert the canaries were added to the placed list
|
|
|
|
if len(state.PlacedCanaries) != desiredUpdates {
|
2020-01-22 20:34:03 +00:00
|
|
|
assert.Fail(t, "expected PlacedCanaries to equal desiredUpdates", state)
|
2017-07-06 15:58:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceSched_JobModify_InPlace(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2017-07-06 15:58:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and create an older deployment
|
|
|
|
job := mock.Job()
|
|
|
|
d := mock.Deployment()
|
|
|
|
d.JobID = job.ID
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
|
|
|
require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d))
|
2017-07-06 15:58:15 +00:00
|
|
|
|
2020-04-21 12:56:05 +00:00
|
|
|
taskName := job.TaskGroups[0].Tasks[0].Name
|
|
|
|
|
|
|
|
adr := structs.AllocatedDeviceResource{
|
|
|
|
Type: "gpu",
|
|
|
|
Vendor: "nvidia",
|
|
|
|
Name: "1080ti",
|
|
|
|
DeviceIDs: []string{uuid.Generate()},
|
|
|
|
}
|
|
|
|
|
2017-07-06 15:58:15 +00:00
|
|
|
// Create allocs that are part of the old deployment
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.DeploymentID = d.ID
|
|
|
|
alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)}
|
2020-04-21 12:56:05 +00:00
|
|
|
alloc.AllocatedResources.Tasks[taskName].Devices = []*structs.AllocatedDeviceResource{&adr}
|
2017-07-06 15:58:15 +00:00
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
// Update the job
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
|
|
|
desiredUpdates := 4
|
|
|
|
job2.TaskGroups[0].Update = &structs.UpdateStrategy{
|
|
|
|
MaxParallel: desiredUpdates,
|
|
|
|
HealthCheck: structs.UpdateStrategyHealthCheck_Checks,
|
|
|
|
MinHealthyTime: 10 * time.Second,
|
|
|
|
HealthyDeadline: 10 * time.Minute,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-09-07 19:27:12 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-09-07 19:27:12 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan did not evict any allocs
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
if len(update) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan updated the existing allocs
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
for _, p := range planned {
|
|
|
|
if p.Job != job2 {
|
|
|
|
t.Fatalf("should update job")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2015-09-07 19:27:12 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2015-09-14 00:06:34 +00:00
|
|
|
|
2020-04-21 12:56:05 +00:00
|
|
|
// Verify the allocated networks and devices did not change
|
2017-12-08 22:50:06 +00:00
|
|
|
rp := structs.Port{Label: "admin", Value: 5000}
|
2015-09-14 00:06:34 +00:00
|
|
|
for _, alloc := range out {
|
2020-04-21 12:56:05 +00:00
|
|
|
for _, resources := range alloc.AllocatedResources.Tasks {
|
2015-11-15 06:28:11 +00:00
|
|
|
if resources.Networks[0].ReservedPorts[0] != rp {
|
2015-09-14 00:06:34 +00:00
|
|
|
t.Fatalf("bad: %#v", alloc)
|
|
|
|
}
|
2020-04-21 12:56:05 +00:00
|
|
|
if len(resources.Devices) == 0 || reflect.DeepEqual(resources.Devices[0], adr) {
|
|
|
|
t.Fatalf("bad devices has changed: %#v", alloc)
|
|
|
|
}
|
2015-09-14 00:06:34 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-06 15:58:15 +00:00
|
|
|
|
|
|
|
// Verify the deployment id was changed and health cleared
|
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.DeploymentID == d.ID {
|
|
|
|
t.Fatalf("bad: deployment id not cleared")
|
|
|
|
} else if alloc.DeploymentStatus != nil {
|
|
|
|
t.Fatalf("bad: deployment status not cleared")
|
|
|
|
}
|
|
|
|
}
|
2015-09-07 19:27:12 +00:00
|
|
|
}
|
|
|
|
|
2019-10-23 22:23:16 +00:00
|
|
|
// TestServiceSched_JobModify_InPlace08 asserts that inplace updates of
|
|
|
|
// allocations created with Nomad 0.8 do not cause panics.
|
|
|
|
//
|
|
|
|
// COMPAT(0.11) - While we do not guarantee that upgrades from 0.8 -> 0.10
|
|
|
|
// (skipping 0.9) are safe, we do want to avoid panics in the scheduler which
|
|
|
|
// cause unrecoverable server outages with no chance of recovery.
|
|
|
|
//
|
|
|
|
// Safe to remove in 0.11.0 as no one should ever be trying to upgrade from 0.8
|
|
|
|
// to 0.11!
|
|
|
|
func TestServiceSched_JobModify_InPlace08(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create node
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Generate a fake job with 0.8 allocations
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Create 0.8 alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job.Copy()
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.AllocatedResources = nil // 0.8 didn't have this
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Update the job inplace
|
|
|
|
job2 := job.Copy()
|
|
|
|
|
|
|
|
job2.TaskGroups[0].Tasks[0].Services[0].Tags[0] = "newtag"
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
require.Len(t, h.Plans, 1)
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan did not evict any allocs
|
|
|
|
var update []*structs.Allocation
|
|
|
|
for _, updateList := range plan.NodeUpdate {
|
|
|
|
update = append(update, updateList...)
|
|
|
|
}
|
|
|
|
require.Zero(t, update)
|
|
|
|
|
|
|
|
// Ensure the plan updated the existing alloc
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
require.Len(t, planned, 1)
|
|
|
|
for _, p := range planned {
|
|
|
|
require.Equal(t, job2, p.Job)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2019-10-23 22:23:16 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
require.Len(t, out, 1)
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
|
|
|
|
newAlloc := out[0]
|
|
|
|
|
|
|
|
// Verify AllocatedResources was set
|
|
|
|
require.NotNil(t, newAlloc.AllocatedResources)
|
|
|
|
}
|
|
|
|
|
2017-03-08 19:47:55 +00:00
|
|
|
func TestServiceSched_JobModify_DistinctProperty(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
node.Meta["rack"] = fmt.Sprintf("rack%d", i)
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2017-03-08 19:47:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job that uses distinct property and has count higher than what is
|
|
|
|
// possible.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 11
|
|
|
|
job.Constraints = append(job.Constraints,
|
|
|
|
&structs.Constraint{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.rack}",
|
|
|
|
})
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-03-08 19:47:55 +00:00
|
|
|
|
|
|
|
oldJob := job.Copy()
|
|
|
|
oldJob.JobModifyIndex -= 1
|
|
|
|
oldJob.TaskGroups[0].Count = 4
|
|
|
|
|
|
|
|
// Place 4 of 10
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 4; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = oldJob
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2017-03-08 19:47:55 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-08 19:47:55 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-03-08 19:47:55 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-03-08 19:47:55 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
if plan.Annotations != nil {
|
|
|
|
t.Fatalf("expected no annotations")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the eval hasn't spawned blocked eval
|
|
|
|
if len(h.CreateEvals) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan failed to alloc
|
|
|
|
outEval := h.Evals[0]
|
|
|
|
if len(outEval.FailedTGAllocs) != 1 {
|
|
|
|
t.Fatalf("bad: %+v", outEval)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", planned)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-03-08 19:47:55 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure different node was used per.
|
|
|
|
used := make(map[string]struct{})
|
|
|
|
for _, alloc := range out {
|
|
|
|
if _, ok := used[alloc.NodeID]; ok {
|
|
|
|
t.Fatalf("Node collision %v", alloc.NodeID)
|
|
|
|
}
|
|
|
|
used[alloc.NodeID] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2019-12-03 14:14:49 +00:00
|
|
|
// TestServiceSched_JobModify_NodeReschedulePenalty ensures that
|
|
|
|
// a failing allocation gets rescheduled with a penalty to the old
|
|
|
|
// node, but an updated job doesn't apply the penalty.
|
|
|
|
func TestServiceSched_JobModify_NodeReschedulePenalty(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
require := require.New(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
|
|
|
require.NoError(h.State.UpsertNode(h.NextIndex(), node))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 1,
|
|
|
|
Interval: 15 * time.Minute,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
MaxDelay: 1 * time.Minute,
|
|
|
|
DelayFunction: "constant",
|
|
|
|
}
|
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
require.NoError(h.State.UpsertJob(h.NextIndex(), job))
|
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
// Mark one of the allocations as failed
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
|
|
|
failedAlloc := allocs[1]
|
|
|
|
failedAllocID := failedAlloc.ID
|
|
|
|
successAllocID := allocs[0].ID
|
|
|
|
|
|
|
|
require.NoError(h.State.UpsertAllocs(h.NextIndex(), allocs))
|
|
|
|
|
|
|
|
// Create and process a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
require.NoError(h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
require.NoError(h.Process(NewServiceScheduler, eval))
|
|
|
|
|
|
|
|
// Ensure we have one plan
|
|
|
|
require.Equal(1, len(h.Plans))
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Verify that one new allocation got created with its restart tracker info
|
|
|
|
require.Equal(3, len(out))
|
|
|
|
var newAlloc *structs.Allocation
|
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.ID != successAllocID && alloc.ID != failedAllocID {
|
|
|
|
newAlloc = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.Equal(failedAllocID, newAlloc.PreviousAllocation)
|
|
|
|
require.Equal(1, len(newAlloc.RescheduleTracker.Events))
|
|
|
|
require.Equal(failedAllocID, newAlloc.RescheduleTracker.Events[0].PrevAllocID)
|
|
|
|
|
|
|
|
// Verify that the node-reschedule penalty was applied to the new alloc
|
|
|
|
for _, scoreMeta := range newAlloc.Metrics.ScoreMetaData {
|
|
|
|
if scoreMeta.NodeID == failedAlloc.NodeID {
|
|
|
|
require.Equal(-1.0, scoreMeta.Scores["node-reschedule-penalty"],
|
|
|
|
"eval to replace failed alloc missing node-reshedule-penalty: %v",
|
|
|
|
scoreMeta.Scores,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the job, such that it cannot be done in-place
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
|
|
|
require.NoError(h.State.UpsertJob(h.NextIndex(), job2))
|
|
|
|
|
|
|
|
// Create and process a mock evaluation
|
|
|
|
eval = &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
require.NoError(h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
require.NoError(h.Process(NewServiceScheduler, eval))
|
|
|
|
|
|
|
|
// Lookup the new allocations by JobID
|
|
|
|
out, err = h.State.AllocsByJob(ws, job.Namespace, job2.ID, false)
|
|
|
|
require.NoError(err)
|
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
|
|
|
require.Equal(2, len(out))
|
|
|
|
|
|
|
|
// No new allocs have node-reschedule-penalty
|
|
|
|
for _, alloc := range out {
|
|
|
|
require.Nil(alloc.RescheduleTracker)
|
|
|
|
require.NotNil(alloc.Metrics)
|
|
|
|
for _, scoreMeta := range alloc.Metrics.ScoreMetaData {
|
|
|
|
if scoreMeta.NodeID != failedAlloc.NodeID {
|
|
|
|
require.Equal(0.0, scoreMeta.Scores["node-reschedule-penalty"],
|
|
|
|
"eval for updated job should not include node-reshedule-penalty: %v",
|
|
|
|
scoreMeta.Scores,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-15 03:54:30 +00:00
|
|
|
func TestServiceSched_JobDeregister_Purged(t *testing.T) {
|
2015-08-11 21:54:21 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2016-07-22 21:53:49 +00:00
|
|
|
for _, alloc := range allocs {
|
|
|
|
h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2015-08-11 21:54:21 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deregister the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-11 21:54:21 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobDeregister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-11 21:54:21 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-08-11 21:54:21 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all nodes
|
2016-01-14 20:57:43 +00:00
|
|
|
if len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"]) != len(allocs) {
|
2017-04-15 03:54:30 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
// Ensure that the job field on the allocation is still populated
|
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.Job == nil {
|
|
|
|
t.Fatalf("bad: %#v", alloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure no remaining allocations
|
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceSched_JobDeregister_Stopped(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
2018-06-13 17:46:39 +00:00
|
|
|
require := require.New(t)
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
|
|
|
job.Stop = true
|
2018-06-13 17:46:39 +00:00
|
|
|
require.NoError(h.State.UpsertJob(h.NextIndex(), job))
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2018-06-13 17:46:39 +00:00
|
|
|
require.NoError(h.State.UpsertAllocs(h.NextIndex(), allocs))
|
|
|
|
|
|
|
|
// Create a summary where the queued allocs are set as we want to assert
|
|
|
|
// they get zeroed out.
|
|
|
|
summary := mock.JobSummary(job.ID)
|
|
|
|
web := summary.Summary["web"]
|
|
|
|
web.Queued = 2
|
|
|
|
require.NoError(h.State.UpsertJobSummary(h.NextIndex(), summary))
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deregister the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-04-15 03:54:30 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobDeregister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-04-15 03:54:30 +00:00
|
|
|
}
|
2018-06-13 17:46:39 +00:00
|
|
|
require.NoError(h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
2018-06-13 17:46:39 +00:00
|
|
|
require.NoError(h.Process(NewServiceScheduler, eval))
|
2017-04-15 03:54:30 +00:00
|
|
|
|
|
|
|
// Ensure a single plan
|
2018-06-13 17:46:39 +00:00
|
|
|
require.Len(h.Plans, 1)
|
2017-04-15 03:54:30 +00:00
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all nodes
|
2018-06-13 17:46:39 +00:00
|
|
|
require.Len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"], len(allocs))
|
2015-08-11 21:54:21 +00:00
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2018-06-13 17:46:39 +00:00
|
|
|
require.NoError(err)
|
2015-08-11 21:54:21 +00:00
|
|
|
|
2016-02-24 22:50:59 +00:00
|
|
|
// Ensure that the job field on the allocation is still populated
|
|
|
|
for _, alloc := range out {
|
2018-06-13 17:46:39 +00:00
|
|
|
require.NotNil(alloc.Job)
|
2016-02-24 22:50:59 +00:00
|
|
|
}
|
|
|
|
|
2015-08-11 21:54:21 +00:00
|
|
|
// Ensure no remaining allocations
|
2016-08-30 22:36:30 +00:00
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
2018-06-13 17:46:39 +00:00
|
|
|
require.Empty(out)
|
|
|
|
|
|
|
|
// Assert the job summary is cleared out
|
|
|
|
sout, err := h.State.JobSummaryByID(ws, job.Namespace, job.ID)
|
|
|
|
require.NoError(err)
|
|
|
|
require.NotNil(sout)
|
|
|
|
require.Contains(sout.Summary, "web")
|
|
|
|
webOut := sout.Summary["web"]
|
|
|
|
require.Zero(webOut.Queued)
|
2015-08-15 21:47:13 +00:00
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2015-08-11 21:54:21 +00:00
|
|
|
}
|
2015-08-14 01:51:08 +00:00
|
|
|
|
2016-07-28 00:49:53 +00:00
|
|
|
func TestServiceSched_NodeDown(t *testing.T) {
|
2020-01-06 20:56:31 +00:00
|
|
|
cases := []struct {
|
|
|
|
desired string
|
|
|
|
client string
|
|
|
|
migrate bool
|
|
|
|
reschedule bool
|
|
|
|
terminal bool
|
|
|
|
lost bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusStop,
|
|
|
|
client: structs.AllocClientStatusRunning,
|
|
|
|
lost: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusRun,
|
|
|
|
client: structs.AllocClientStatusPending,
|
|
|
|
migrate: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusRun,
|
|
|
|
client: structs.AllocClientStatusRunning,
|
|
|
|
migrate: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusRun,
|
|
|
|
client: structs.AllocClientStatusLost,
|
|
|
|
terminal: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusRun,
|
|
|
|
client: structs.AllocClientStatusComplete,
|
|
|
|
terminal: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusRun,
|
|
|
|
client: structs.AllocClientStatusFailed,
|
|
|
|
reschedule: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desired: structs.AllocDesiredStatusEvict,
|
|
|
|
client: structs.AllocClientStatusRunning,
|
|
|
|
lost: true,
|
|
|
|
},
|
2016-07-28 00:49:53 +00:00
|
|
|
}
|
2016-08-04 18:24:17 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
for i, tc := range cases {
|
|
|
|
t.Run(fmt.Sprintf(""), func(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
2016-07-28 00:49:53 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
// Register a node
|
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusDown
|
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2019-06-06 19:50:23 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2018-02-21 18:58:04 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2016-07-28 00:49:53 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
alloc.DesiredStatus = tc.desired
|
|
|
|
alloc.ClientStatus = tc.client
|
2016-07-28 00:49:53 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
// Mark for migration if necessary
|
|
|
|
alloc.DesiredTransition.Migrate = helper.BoolToPtr(tc.migrate)
|
2016-07-28 00:49:53 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
allocs := []*structs.Allocation{alloc}
|
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2016-08-03 22:45:42 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-08-03 22:45:42 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
2019-06-06 19:50:23 +00:00
|
|
|
|
2020-01-06 20:56:31 +00:00
|
|
|
if tc.terminal {
|
|
|
|
// No plan for terminal state allocs
|
|
|
|
require.Len(t, h.Plans, 0)
|
2019-06-06 19:50:23 +00:00
|
|
|
} else {
|
2020-01-06 20:56:31 +00:00
|
|
|
require.Len(t, h.Plans, 1)
|
|
|
|
|
|
|
|
plan := h.Plans[0]
|
|
|
|
out := plan.NodeUpdate[node.ID]
|
|
|
|
require.Len(t, out, 1)
|
|
|
|
|
|
|
|
outAlloc := out[0]
|
|
|
|
if tc.migrate {
|
|
|
|
require.NotEqual(t, structs.AllocClientStatusLost, outAlloc.ClientStatus)
|
|
|
|
} else if tc.reschedule {
|
|
|
|
require.Equal(t, structs.AllocClientStatusFailed, outAlloc.ClientStatus)
|
|
|
|
} else if tc.lost {
|
|
|
|
require.Equal(t, structs.AllocClientStatusLost, outAlloc.ClientStatus)
|
|
|
|
} else {
|
|
|
|
require.Fail(t, "unexpected alloc update")
|
|
|
|
}
|
2019-06-06 19:50:23 +00:00
|
|
|
}
|
2020-01-06 20:56:31 +00:00
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2019-06-06 19:50:23 +00:00
|
|
|
})
|
2016-07-28 00:49:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-13 20:39:04 +00:00
|
|
|
func TestServiceSched_StopAfterClientDisconnect(t *testing.T) {
|
|
|
|
cases := []struct {
|
|
|
|
stop time.Duration
|
|
|
|
when time.Time
|
|
|
|
rescheduled bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
rescheduled: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
stop: 1 * time.Second,
|
|
|
|
rescheduled: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
stop: 1 * time.Second,
|
|
|
|
when: time.Now().UTC().Add(-10 * time.Second),
|
|
|
|
rescheduled: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
stop: 1 * time.Second,
|
|
|
|
when: time.Now().UTC().Add(10 * time.Minute),
|
|
|
|
rescheduled: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, tc := range cases {
|
|
|
|
t.Run(fmt.Sprintf(""), func(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Node, which is down
|
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusDown
|
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
|
|
|
|
|
|
|
// Job with allocations and stop_after_client_disconnect
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 1
|
|
|
|
job.TaskGroups[0].StopAfterClientDisconnect = &tc.stop
|
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
|
|
|
|
|
|
|
// Alloc for the running group
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
if !tc.when.IsZero() {
|
|
|
|
alloc.AllocStates = []*structs.AllocState{{
|
|
|
|
Field: structs.AllocStateFieldClientStatus,
|
|
|
|
Value: structs.AllocClientStatusLost,
|
|
|
|
Time: tc.when,
|
|
|
|
}}
|
|
|
|
}
|
|
|
|
allocs := []*structs.Allocation{alloc}
|
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
evals := []*structs.Evaluation{{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeDrain,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}}
|
|
|
|
eval := evals[0]
|
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), evals))
|
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, h.Evals[0].Status, structs.EvalStatusComplete)
|
|
|
|
require.Len(t, h.Plans, 1, "plan")
|
|
|
|
|
2020-06-03 13:48:38 +00:00
|
|
|
// One followup eval created, either delayed or blocked
|
|
|
|
require.Len(t, h.CreateEvals, 1)
|
2020-05-13 20:39:04 +00:00
|
|
|
e := h.CreateEvals[0]
|
|
|
|
require.Equal(t, eval.ID, e.PreviousEval)
|
|
|
|
|
|
|
|
if tc.rescheduled {
|
|
|
|
require.Equal(t, "blocked", e.Status)
|
|
|
|
} else {
|
|
|
|
require.Equal(t, "pending", e.Status)
|
|
|
|
require.NotEmpty(t, e.WaitUntil)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This eval is still being inserted in the state store
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
found, err := h.State.EvalByID(ws, e.ID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if found == nil {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
alloc, err = h.State.AllocByID(ws, alloc.ID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Allocations have been transitioned to lost
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusStop, alloc.DesiredStatus)
|
|
|
|
require.Equal(t, structs.AllocClientStatusLost, alloc.ClientStatus)
|
|
|
|
// At least 1, 2 if we manually set the tc.when
|
|
|
|
require.NotEmpty(t, alloc.AllocStates)
|
|
|
|
|
|
|
|
if tc.rescheduled {
|
|
|
|
// Register a new node, leave it up, process the followup eval
|
|
|
|
node = mock.Node()
|
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
|
|
|
require.NoError(t, h.Process(NewServiceScheduler, eval))
|
|
|
|
|
|
|
|
as, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
as, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
return len(as) == 2, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
|
|
|
|
a2 := as[0]
|
|
|
|
if a2.ID == alloc.ID {
|
|
|
|
a2 = as[1]
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, structs.AllocClientStatusPending, a2.ClientStatus)
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusRun, a2.DesiredStatus)
|
|
|
|
require.Equal(t, node.ID, a2.NodeID)
|
|
|
|
|
|
|
|
// No blocked evals
|
|
|
|
require.Empty(t, h.ReblockEvals)
|
|
|
|
require.Len(t, h.CreateEvals, 1)
|
|
|
|
require.Equal(t, h.CreateEvals[0].ID, e.ID)
|
|
|
|
} else {
|
|
|
|
// No new alloc was created
|
|
|
|
as, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, as, 1)
|
|
|
|
old := as[0]
|
|
|
|
|
|
|
|
require.Equal(t, alloc.ID, old.ID)
|
|
|
|
require.Equal(t, structs.AllocClientStatusLost, old.ClientStatus)
|
|
|
|
require.Equal(t, structs.AllocDesiredStatusStop, old.DesiredStatus)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-28 19:13:35 +00:00
|
|
|
func TestServiceSched_NodeUpdate(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Register a node
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-07-28 19:13:35 +00:00
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-07-28 19:13:35 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2016-07-28 19:13:35 +00:00
|
|
|
|
|
|
|
// Mark some allocs as running
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2016-07-28 19:13:35 +00:00
|
|
|
for i := 0; i < 4; i++ {
|
2017-02-08 05:22:48 +00:00
|
|
|
out, _ := h.State.AllocByID(ws, allocs[i].ID)
|
2016-07-28 19:13:35 +00:00
|
|
|
out.ClientStatus = structs.AllocClientStatusRunning
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{out}))
|
2016-07-28 19:13:35 +00:00
|
|
|
}
|
|
|
|
|
2016-07-28 19:22:44 +00:00
|
|
|
// Create a mock evaluation which won't trigger any new placements
|
2016-07-28 19:13:35 +00:00
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-07-28 19:13:35 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-07-28 19:13:35 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-07-28 19:13:35 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-07-28 21:02:50 +00:00
|
|
|
if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 {
|
2016-07-28 19:13:35 +00:00
|
|
|
t.Fatalf("bad queued allocations: %v", h.Evals[0].QueuedAllocations)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2015-08-14 01:51:08 +00:00
|
|
|
func TestServiceSched_NodeDrain(t *testing.T) {
|
2015-08-14 05:11:32 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Register a draining node
|
|
|
|
node := mock.Node()
|
2015-09-07 02:47:02 +00:00
|
|
|
node.Drain = true
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2015-08-14 05:11:32 +00:00
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2015-08-14 05:11:32 +00:00
|
|
|
}
|
|
|
|
|
2016-02-03 22:15:02 +00:00
|
|
|
// Generate a fake job with allocations and an update policy.
|
2015-08-14 05:11:32 +00:00
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2015-08-14 05:11:32 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
2015-09-07 02:47:02 +00:00
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2018-02-23 01:38:44 +00:00
|
|
|
alloc.DesiredTransition.Migrate = helper.BoolToPtr(true)
|
2015-08-14 05:11:32 +00:00
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2015-08-14 05:11:32 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-14 05:11:32 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-14 05:11:32 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-08-14 05:11:32 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all allocs
|
2015-08-26 00:06:06 +00:00
|
|
|
if len(plan.NodeUpdate[node.ID]) != len(allocs) {
|
2015-08-14 05:11:32 +00:00
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2015-08-14 05:11:32 +00:00
|
|
|
|
|
|
|
// Ensure all allocations placed
|
2016-08-30 22:36:30 +00:00
|
|
|
out, _ = structs.FilterTerminalAllocs(out)
|
2015-08-14 05:11:32 +00:00
|
|
|
if len(out) != 10 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
2015-08-15 21:47:13 +00:00
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-08-09 21:48:25 +00:00
|
|
|
func TestServiceSched_NodeDrain_Down(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Register a draining node
|
|
|
|
node := mock.Node()
|
|
|
|
node.Drain = true
|
|
|
|
node.Status = structs.NodeStatusDown
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
// Set the desired state of the allocs to stop
|
|
|
|
var stop []*structs.Allocation
|
2018-02-21 18:58:04 +00:00
|
|
|
for i := 0; i < 6; i++ {
|
2016-08-09 21:48:25 +00:00
|
|
|
newAlloc := allocs[i].Copy()
|
|
|
|
newAlloc.ClientStatus = structs.AllocDesiredStatusStop
|
2018-02-23 01:38:44 +00:00
|
|
|
newAlloc.DesiredTransition.Migrate = helper.BoolToPtr(true)
|
2016-08-09 21:48:25 +00:00
|
|
|
stop = append(stop, newAlloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), stop))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
// Mark some of the allocations as running
|
|
|
|
var running []*structs.Allocation
|
|
|
|
for i := 4; i < 6; i++ {
|
|
|
|
newAlloc := stop[i].Copy()
|
|
|
|
newAlloc.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
running = append(running, newAlloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpdateAllocsFromClient(h.NextIndex(), running))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
// Mark some of the allocations as complete
|
|
|
|
var complete []*structs.Allocation
|
|
|
|
for i := 6; i < 10; i++ {
|
2018-02-21 18:58:04 +00:00
|
|
|
newAlloc := allocs[i].Copy()
|
2018-01-14 22:47:21 +00:00
|
|
|
newAlloc.TaskStates = make(map[string]*structs.TaskState)
|
|
|
|
newAlloc.TaskStates["web"] = &structs.TaskState{
|
|
|
|
State: structs.TaskStateDead,
|
|
|
|
Events: []*structs.TaskEvent{
|
|
|
|
{
|
|
|
|
Type: structs.TaskTerminated,
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2016-08-09 21:48:25 +00:00
|
|
|
newAlloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
complete = append(complete, newAlloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpdateAllocsFromClient(h.NextIndex(), complete))
|
2016-08-09 21:48:25 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with the node update
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-09 21:48:25 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-09 21:48:25 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-08-09 21:48:25 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted non terminal allocs
|
|
|
|
if len(plan.NodeUpdate[node.ID]) != 6 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that all the allocations which were in running or pending state
|
|
|
|
// has been marked as lost
|
|
|
|
var lostAllocs []string
|
|
|
|
for _, alloc := range plan.NodeUpdate[node.ID] {
|
|
|
|
lostAllocs = append(lostAllocs, alloc.ID)
|
|
|
|
}
|
|
|
|
sort.Strings(lostAllocs)
|
|
|
|
|
|
|
|
var expectedLostAllocs []string
|
|
|
|
for i := 0; i < 6; i++ {
|
|
|
|
expectedLostAllocs = append(expectedLostAllocs, allocs[i].ID)
|
|
|
|
}
|
|
|
|
sort.Strings(expectedLostAllocs)
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(expectedLostAllocs, lostAllocs) {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", expectedLostAllocs, lostAllocs)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-07-22 17:18:23 +00:00
|
|
|
func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Register a draining node
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-07-22 17:18:23 +00:00
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-07-22 17:18:23 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
2018-02-23 01:38:44 +00:00
|
|
|
alloc.DesiredTransition.Migrate = helper.BoolToPtr(true)
|
2016-07-22 17:18:23 +00:00
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2016-07-22 17:18:23 +00:00
|
|
|
|
|
|
|
node.Drain = true
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-07-22 17:18:23 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-07-22 17:18:23 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
NodeID: node.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-07-22 17:18:23 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-07-22 17:18:23 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
queued := h.Evals[0].QueuedAllocations["web"]
|
|
|
|
if queued != 2 {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", 2, queued)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-15 21:47:13 +00:00
|
|
|
func TestServiceSched_RetryLimit(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
h.Planner = &RejectPlan{h}
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2015-08-15 21:47:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2015-08-15 21:47:13 +00:00
|
|
|
|
2015-10-14 23:43:06 +00:00
|
|
|
// Create a mock evaluation to register the job
|
2015-08-15 21:47:13 +00:00
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-08-15 21:47:13 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2015-08-15 21:47:13 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2015-08-15 21:47:13 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure multiple plans
|
|
|
|
if len(h.Plans) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2015-08-15 21:47:13 +00:00
|
|
|
|
|
|
|
// Ensure no allocations placed
|
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should hit the retry limit
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusFailed)
|
2015-08-14 01:51:08 +00:00
|
|
|
}
|
2016-02-03 01:19:41 +00:00
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
func TestServiceSched_Reschedule_OnceNow(t *testing.T) {
|
2018-01-14 22:47:21 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
2018-03-02 00:23:44 +00:00
|
|
|
Attempts: 1,
|
|
|
|
Interval: 15 * time.Minute,
|
|
|
|
Delay: 5 * time.Second,
|
2018-03-13 15:06:26 +00:00
|
|
|
MaxDelay: 1 * time.Minute,
|
2018-03-26 19:45:09 +00:00
|
|
|
DelayFunction: "constant",
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
2018-03-02 00:23:44 +00:00
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
// Mark one of the allocations as failed
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
2018-03-02 00:23:44 +00:00
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
2018-01-14 22:47:21 +00:00
|
|
|
failedAllocID := allocs[1].ID
|
|
|
|
successAllocID := allocs[0].ID
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure multiple plans
|
|
|
|
if len(h.Plans) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Verify that one new allocation got created with its restart tracker info
|
|
|
|
assert := assert.New(t)
|
|
|
|
assert.Equal(3, len(out))
|
|
|
|
var newAlloc *structs.Allocation
|
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.ID != successAllocID && alloc.ID != failedAllocID {
|
|
|
|
newAlloc = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert.Equal(failedAllocID, newAlloc.PreviousAllocation)
|
2018-01-17 19:22:30 +00:00
|
|
|
assert.Equal(1, len(newAlloc.RescheduleTracker.Events))
|
|
|
|
assert.Equal(failedAllocID, newAlloc.RescheduleTracker.Events[0].PrevAllocID)
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Mark this alloc as failed again, should not get rescheduled
|
|
|
|
newAlloc.ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{newAlloc}))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Create another mock evaluation
|
|
|
|
eval = &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err = h.Process(NewServiceScheduler, eval)
|
|
|
|
assert.Nil(err)
|
|
|
|
// Verify no new allocs were created this time
|
|
|
|
out, err = h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-14 22:47:21 +00:00
|
|
|
assert.Equal(3, len(out))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
// Tests that alloc reschedulable at a future time creates a follow up eval
|
|
|
|
func TestServiceSched_Reschedule_Later(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
require := require.New(t)
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(h.State.UpsertNode(h.NextIndex(), node))
|
2018-03-02 00:23:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
delayDuration := 15 * time.Second
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 1,
|
|
|
|
Interval: 15 * time.Minute,
|
|
|
|
Delay: delayDuration,
|
2018-03-13 15:06:26 +00:00
|
|
|
MaxDelay: 1 * time.Minute,
|
2018-03-26 19:45:09 +00:00
|
|
|
DelayFunction: "constant",
|
2018-03-02 00:23:44 +00:00
|
|
|
}
|
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(h.State.UpsertJob(h.NextIndex(), job))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
// Mark one of the allocations as failed
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now}}
|
|
|
|
failedAllocID := allocs[1].ID
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure multiple plans
|
|
|
|
if len(h.Plans) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(err)
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Verify no new allocs were created
|
|
|
|
require.Equal(2, len(out))
|
|
|
|
|
|
|
|
// Verify follow up eval was created for the failed alloc
|
|
|
|
alloc, err := h.State.AllocByID(ws, failedAllocID)
|
|
|
|
require.Nil(err)
|
|
|
|
require.NotEmpty(alloc.FollowupEvalID)
|
|
|
|
|
|
|
|
// Ensure there is a follow up eval.
|
|
|
|
if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusPending {
|
|
|
|
t.Fatalf("bad: %#v", h.CreateEvals)
|
|
|
|
}
|
|
|
|
followupEval := h.CreateEvals[0]
|
|
|
|
require.Equal(now.Add(delayDuration), followupEval.WaitUntil)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceSched_Reschedule_MultipleNow(t *testing.T) {
|
2018-01-14 22:47:21 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
maxRestartAttempts := 3
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
2018-03-02 00:23:44 +00:00
|
|
|
Attempts: maxRestartAttempts,
|
|
|
|
Interval: 30 * time.Minute,
|
|
|
|
Delay: 5 * time.Second,
|
2018-03-26 19:45:09 +00:00
|
|
|
DelayFunction: "constant",
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
2018-03-02 00:23:44 +00:00
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
// Mark one of the allocations as failed
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
2018-03-02 00:23:44 +00:00
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
2018-01-14 22:47:21 +00:00
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
expectedNumAllocs := 3
|
|
|
|
expectedNumReschedTrackers := 1
|
|
|
|
|
2018-01-24 15:33:55 +00:00
|
|
|
failedAllocId := allocs[1].ID
|
|
|
|
failedNodeID := allocs[1].NodeID
|
|
|
|
|
2018-01-14 22:47:21 +00:00
|
|
|
assert := assert.New(t)
|
|
|
|
for i := 0; i < maxRestartAttempts; i++ {
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Ensure multiple plans
|
|
|
|
if len(h.Plans) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Verify that a new allocation got created with its restart tracker info
|
|
|
|
assert.Equal(expectedNumAllocs, len(out))
|
|
|
|
|
|
|
|
// Find the new alloc with ClientStatusPending
|
|
|
|
var pendingAllocs []*structs.Allocation
|
2018-01-24 20:56:57 +00:00
|
|
|
var prevFailedAlloc *structs.Allocation
|
|
|
|
|
2018-01-14 22:47:21 +00:00
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.ClientStatus == structs.AllocClientStatusPending {
|
|
|
|
pendingAllocs = append(pendingAllocs, alloc)
|
|
|
|
}
|
2018-01-24 20:56:57 +00:00
|
|
|
if alloc.ID == failedAllocId {
|
|
|
|
prevFailedAlloc = alloc
|
|
|
|
}
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
assert.Equal(1, len(pendingAllocs))
|
|
|
|
newAlloc := pendingAllocs[0]
|
2018-01-17 19:22:30 +00:00
|
|
|
assert.Equal(expectedNumReschedTrackers, len(newAlloc.RescheduleTracker.Events))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
2018-01-24 15:33:55 +00:00
|
|
|
// Verify the previous NodeID in the most recent reschedule event
|
|
|
|
reschedEvents := newAlloc.RescheduleTracker.Events
|
|
|
|
assert.Equal(failedAllocId, reschedEvents[len(reschedEvents)-1].PrevAllocID)
|
|
|
|
assert.Equal(failedNodeID, reschedEvents[len(reschedEvents)-1].PrevNodeID)
|
|
|
|
|
2018-01-24 20:56:57 +00:00
|
|
|
// Verify that the next alloc of the failed alloc is the newly rescheduled alloc
|
|
|
|
assert.Equal(newAlloc.ID, prevFailedAlloc.NextAllocation)
|
|
|
|
|
2018-01-14 22:47:21 +00:00
|
|
|
// Mark this alloc as failed again
|
|
|
|
newAlloc.ClientStatus = structs.AllocClientStatusFailed
|
2018-03-02 00:23:44 +00:00
|
|
|
newAlloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-12 * time.Second),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
2018-01-14 22:47:21 +00:00
|
|
|
|
2018-01-24 15:33:55 +00:00
|
|
|
failedAllocId = newAlloc.ID
|
|
|
|
failedNodeID = newAlloc.NodeID
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{newAlloc}))
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
// Create another mock evaluation
|
|
|
|
eval = &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-01-14 22:47:21 +00:00
|
|
|
expectedNumAllocs += 1
|
|
|
|
expectedNumReschedTrackers += 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// Process last eval again, should not reschedule
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
assert.Nil(err)
|
|
|
|
|
|
|
|
// Verify no new allocs were created because restart attempts were exhausted
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-14 22:47:21 +00:00
|
|
|
assert.Equal(5, len(out)) // 2 original, plus 3 reschedule attempts
|
|
|
|
}
|
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
// Tests that old reschedule attempts are pruned
|
|
|
|
func TestServiceSched_Reschedule_PruneEvents(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2018-03-02 00:23:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations and an update policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
DelayFunction: "exponential",
|
2018-03-13 15:06:26 +00:00
|
|
|
MaxDelay: 1 * time.Hour,
|
2018-03-02 00:23:44 +00:00
|
|
|
Delay: 5 * time.Second,
|
|
|
|
Unlimited: true,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
now := time.Now()
|
|
|
|
// Mark allocations as failed with restart info
|
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{job.TaskGroups[0].Name: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-15 * time.Minute)}}
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
|
|
|
|
allocs[1].RescheduleTracker = &structs.RescheduleTracker{
|
|
|
|
Events: []*structs.RescheduleEvent{
|
|
|
|
{RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
|
|
|
|
PrevAllocID: uuid.Generate(),
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{RescheduleTime: now.Add(-40 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevAllocID: allocs[0].ID,
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 10 * time.Second,
|
|
|
|
},
|
|
|
|
{RescheduleTime: now.Add(-30 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevAllocID: allocs[0].ID,
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 20 * time.Second,
|
|
|
|
},
|
|
|
|
{RescheduleTime: now.Add(-20 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevAllocID: allocs[0].ID,
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 40 * time.Second,
|
|
|
|
},
|
|
|
|
{RescheduleTime: now.Add(-10 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevAllocID: allocs[0].ID,
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 80 * time.Second,
|
|
|
|
},
|
|
|
|
{RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevAllocID: allocs[0].ID,
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
Delay: 160 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
expectedFirstRescheduleEvent := allocs[1].RescheduleTracker.Events[1]
|
|
|
|
expectedDelay := 320 * time.Second
|
|
|
|
failedAllocID := allocs[1].ID
|
|
|
|
successAllocID := allocs[0].ID
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure multiple plans
|
|
|
|
if len(h.Plans) == 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Verify that one new allocation got created with its restart tracker info
|
|
|
|
assert := assert.New(t)
|
|
|
|
assert.Equal(3, len(out))
|
|
|
|
var newAlloc *structs.Allocation
|
|
|
|
for _, alloc := range out {
|
|
|
|
if alloc.ID != successAllocID && alloc.ID != failedAllocID {
|
|
|
|
newAlloc = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assert.Equal(failedAllocID, newAlloc.PreviousAllocation)
|
|
|
|
// Verify that the new alloc copied the last 5 reschedule attempts
|
|
|
|
assert.Equal(6, len(newAlloc.RescheduleTracker.Events))
|
|
|
|
assert.Equal(expectedFirstRescheduleEvent, newAlloc.RescheduleTracker.Events[0])
|
|
|
|
|
|
|
|
mostRecentRescheduleEvent := newAlloc.RescheduleTracker.Events[5]
|
|
|
|
// Verify that the failed alloc ID is in the most recent reschedule event
|
|
|
|
assert.Equal(failedAllocID, mostRecentRescheduleEvent.PrevAllocID)
|
|
|
|
// Verify that the delay value was captured correctly
|
|
|
|
assert.Equal(expectedDelay, mostRecentRescheduleEvent.Delay)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-04-23 23:35:25 +00:00
|
|
|
// Tests that deployments with failed allocs result in placements as long as the
|
|
|
|
// deployment is running.
|
|
|
|
func TestDeployment_FailedAllocs_Reschedule(t *testing.T) {
|
|
|
|
for _, failedDeployment := range []bool{false, true} {
|
|
|
|
t.Run(fmt.Sprintf("Failed Deployment: %v", failedDeployment), func(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
require := require.New(t)
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(h.State.UpsertNode(h.NextIndex(), node))
|
2018-04-23 23:35:25 +00:00
|
|
|
}
|
2018-02-02 23:22:37 +00:00
|
|
|
|
2018-04-23 23:35:25 +00:00
|
|
|
// Generate a fake job with allocations and a reschedule policy.
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 2
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 1,
|
|
|
|
Interval: 15 * time.Minute,
|
|
|
|
}
|
|
|
|
jobIndex := h.NextIndex()
|
|
|
|
require.Nil(h.State.UpsertJob(jobIndex, job))
|
|
|
|
|
|
|
|
deployment := mock.Deployment()
|
|
|
|
deployment.JobID = job.ID
|
|
|
|
deployment.JobCreateIndex = jobIndex
|
|
|
|
deployment.JobVersion = job.Version
|
|
|
|
if failedDeployment {
|
|
|
|
deployment.Status = structs.DeploymentStatusFailed
|
|
|
|
}
|
2018-02-02 23:22:37 +00:00
|
|
|
|
2018-04-23 23:35:25 +00:00
|
|
|
require.Nil(h.State.UpsertDeployment(h.NextIndex(), deployment))
|
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.DeploymentID = deployment.ID
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
// Mark one of the allocations as failed in the past
|
|
|
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
allocs[1].TaskStates = map[string]*structs.TaskState{"web": {State: "start",
|
|
|
|
StartedAt: time.Now().Add(-12 * time.Hour),
|
|
|
|
FinishedAt: time.Now().Add(-10 * time.Hour)}}
|
|
|
|
allocs[1].DesiredTransition.Reschedule = helper.BoolToPtr(true)
|
|
|
|
|
|
|
|
require.Nil(h.State.UpsertAllocs(h.NextIndex(), allocs))
|
|
|
|
|
|
|
|
// Create a mock evaluation
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
require.Nil(h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
require.Nil(h.Process(NewServiceScheduler, eval))
|
|
|
|
|
|
|
|
if failedDeployment {
|
|
|
|
// Verify no plan created
|
|
|
|
require.Len(h.Plans, 0)
|
|
|
|
} else {
|
|
|
|
require.Len(h.Plans, 1)
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2018-02-02 23:22:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-12 23:14:32 +00:00
|
|
|
func TestBatchSched_Run_CompleteAlloc(t *testing.T) {
|
2016-02-03 01:19:41 +00:00
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2017-09-14 21:00:33 +00:00
|
|
|
job.Type = structs.JobTypeBatch
|
2016-02-03 01:19:41 +00:00
|
|
|
job.TaskGroups[0].Count = 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
2016-04-12 23:14:32 +00:00
|
|
|
// Create a complete alloc
|
2016-02-03 01:19:41 +00:00
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
2016-03-24 01:08:19 +00:00
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-02-03 01:19:41 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-02-03 01:19:41 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure no plan as it should be a no-op
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Ensure no allocations placed
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBatchSched_Run_FailedAlloc(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2017-09-14 21:00:33 +00:00
|
|
|
job.Type = structs.JobTypeBatch
|
2016-02-03 01:19:41 +00:00
|
|
|
job.TaskGroups[0].Count = 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2016-02-03 01:19:41 +00:00
|
|
|
// Create a failed alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusFailed
|
2018-03-02 00:23:44 +00:00
|
|
|
alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-02-03 01:19:41 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-02-03 01:19:41 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-03-21 21:17:37 +00:00
|
|
|
// Ensure a plan
|
2016-02-03 01:19:41 +00:00
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-02-03 01:19:41 +00:00
|
|
|
|
|
|
|
// Ensure a replacement alloc was placed.
|
|
|
|
if len(out) != 2 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
2016-07-22 19:06:03 +00:00
|
|
|
// Ensure that the scheduler is recording the correct number of queued
|
|
|
|
// allocations
|
|
|
|
queued := h.Evals[0].QueuedAllocations["web"]
|
|
|
|
if queued != 0 {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", 1, queued)
|
|
|
|
}
|
|
|
|
|
2016-02-03 01:19:41 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
2016-05-25 00:47:03 +00:00
|
|
|
|
2018-01-04 22:20:32 +00:00
|
|
|
func TestBatchSched_Run_LostAlloc(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2018-01-04 22:20:32 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
|
|
|
job.ID = "my-job"
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.TaskGroups[0].Count = 3
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2018-01-04 22:20:32 +00:00
|
|
|
|
|
|
|
// Desired = 3
|
|
|
|
// Mark one as lost and then schedule
|
|
|
|
// [(0, run, running), (1, run, running), (1, stop, lost)]
|
|
|
|
|
|
|
|
// Create two running allocations
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i <= 1; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a failed alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[1]"
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
allocs = append(allocs, alloc)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2018-01-04 22:20:32 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2018-01-04 22:20:32 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2018-01-04 22:20:32 +00:00
|
|
|
|
|
|
|
// Ensure a replacement alloc was placed.
|
|
|
|
if len(out) != 4 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert that we have the correct number of each alloc name
|
|
|
|
expected := map[string]int{
|
|
|
|
"my-job.web[0]": 1,
|
|
|
|
"my-job.web[1]": 2,
|
|
|
|
"my-job.web[2]": 1,
|
|
|
|
}
|
|
|
|
actual := make(map[string]int, 3)
|
|
|
|
for _, alloc := range out {
|
|
|
|
actual[alloc.Name] += 1
|
|
|
|
}
|
|
|
|
require.Equal(t, actual, expected)
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
2016-07-22 19:06:03 +00:00
|
|
|
func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
node.Drain = true
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-07-22 19:06:03 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2017-09-14 21:00:33 +00:00
|
|
|
job.Type = structs.JobTypeBatch
|
2016-07-22 19:06:03 +00:00
|
|
|
job.TaskGroups[0].Count = 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-07-22 19:06:03 +00:00
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
tgName := job.TaskGroups[0].Name
|
|
|
|
now := time.Now()
|
|
|
|
|
2016-07-22 19:06:03 +00:00
|
|
|
// Create a failed alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusFailed
|
2018-03-02 00:23:44 +00:00
|
|
|
alloc.TaskStates = map[string]*structs.TaskState{tgName: {State: "dead",
|
|
|
|
StartedAt: now.Add(-1 * time.Hour),
|
|
|
|
FinishedAt: now.Add(-10 * time.Second)}}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
|
2016-07-22 19:06:03 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-07-22 19:06:03 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-07-22 19:06:03 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-07-22 19:06:03 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the scheduler is recording the correct number of queued
|
|
|
|
// allocations
|
|
|
|
queued := h.Evals[0].QueuedAllocations["web"]
|
|
|
|
if queued != 1 {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", 1, queued)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-25 00:47:03 +00:00
|
|
|
func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create two nodes, one that is drained and has a successfully finished
|
|
|
|
// alloc and a fresh undrained one
|
|
|
|
node := mock.Node()
|
|
|
|
node.Drain = true
|
|
|
|
node2 := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2))
|
2016-05-25 00:47:03 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.TaskGroups[0].Count = 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-05-25 00:47:03 +00:00
|
|
|
|
|
|
|
// Create a successful alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
alloc.TaskStates = map[string]*structs.TaskState{
|
2017-09-26 22:26:33 +00:00
|
|
|
"web": {
|
2016-05-25 00:47:03 +00:00
|
|
|
State: structs.TaskStateDead,
|
|
|
|
Events: []*structs.TaskEvent{
|
|
|
|
{
|
|
|
|
Type: structs.TaskTerminated,
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
|
2016-05-25 00:47:03 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to rerun the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-05-25 00:47:03 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-05-25 00:47:03 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-05-25 00:47:03 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2016-05-25 00:47:03 +00:00
|
|
|
|
|
|
|
// Ensure no replacement alloc was placed.
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
2016-07-27 18:54:55 +00:00
|
|
|
|
2017-03-12 01:19:22 +00:00
|
|
|
// This test checks that terminal allocations that receive an in-place updated
|
|
|
|
// are not added to the plan
|
|
|
|
func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2017-03-12 01:19:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-03-12 01:19:22 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2017-03-12 01:19:22 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Create a mock evaluation to trigger the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-14 21:00:33 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-09-14 21:00:33 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test ensures that terminal jobs from older versions are ignored.
|
|
|
|
func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
|
|
|
nodes = append(nodes, node)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2017-09-14 21:00:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a fake job with allocations
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
2017-03-12 01:19:22 +00:00
|
|
|
// Update the job
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.ID = job.ID
|
2017-09-14 21:00:33 +00:00
|
|
|
job2.Type = structs.JobTypeBatch
|
|
|
|
job2.Version++
|
|
|
|
job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
|
2017-03-12 01:19:22 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
allocs = nil
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job2
|
|
|
|
alloc.JobID = job2.ID
|
|
|
|
alloc.NodeID = nodes[i].ID
|
|
|
|
alloc.Name = fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
alloc.TaskStates = map[string]*structs.TaskState{
|
2017-09-26 22:26:33 +00:00
|
|
|
"web": {
|
2017-09-14 21:00:33 +00:00
|
|
|
State: structs.TaskStateDead,
|
|
|
|
Events: []*structs.TaskEvent{
|
|
|
|
{
|
|
|
|
Type: structs.TaskTerminated,
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
allocs = append(allocs, alloc)
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
2017-03-12 01:19:22 +00:00
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-12 01:19:22 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-03-12 01:19:22 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-03-12 01:19:22 +00:00
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Ensure a plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test asserts that an allocation from an old job that is running on a
|
|
|
|
// drained node is cleaned up.
|
|
|
|
func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create two nodes, one that is drained and has a successfully finished
|
|
|
|
// alloc and a fresh undrained one
|
|
|
|
node := mock.Node()
|
|
|
|
node.Drain = true
|
|
|
|
node2 := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.TaskGroups[0].Count = 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a running alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create an update job
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"}
|
2018-02-21 18:58:04 +00:00
|
|
|
job2.Version++
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job2))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-14 21:00:33 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-09-14 21:00:33 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted 1
|
|
|
|
if len(plan.NodeUpdate[node.ID]) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan places 1
|
|
|
|
if len(plan.NodeAllocation[node2.ID]) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test asserts that an allocation from a job that is complete on a
|
|
|
|
// drained node is ignored up.
|
|
|
|
func TestBatchSched_NodeDrain_Complete(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create two nodes, one that is drained and has a successfully finished
|
|
|
|
// alloc and a fresh undrained one
|
|
|
|
node := mock.Node()
|
|
|
|
node.Drain = true
|
|
|
|
node2 := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node2))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.TaskGroups[0].Count = 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a complete alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
2018-01-04 22:20:32 +00:00
|
|
|
alloc.TaskStates = make(map[string]*structs.TaskState)
|
|
|
|
alloc.TaskStates["web"] = &structs.TaskState{
|
|
|
|
State: structs.TaskStateDead,
|
|
|
|
Events: []*structs.TaskEvent{
|
2018-01-04 22:45:15 +00:00
|
|
|
{
|
2018-01-04 22:20:32 +00:00
|
|
|
Type: structs.TaskTerminated,
|
|
|
|
ExitCode: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-14 21:00:33 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-09-14 21:00:33 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-12 01:19:22 +00:00
|
|
|
// Ensure no plan
|
|
|
|
if len(h.Plans) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2017-03-12 01:19:22 +00:00
|
|
|
}
|
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// This is a slightly odd test but it ensures that we handle a scale down of a
|
|
|
|
// task group's count and that it works even if all the allocs have the same
|
|
|
|
// name.
|
|
|
|
func TestBatchSched_ScaleDown_SameName(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.TaskGroups[0].Count = 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
2019-03-13 04:36:46 +00:00
|
|
|
scoreMetric := &structs.AllocMetric{
|
|
|
|
NodesEvaluated: 10,
|
|
|
|
NodesFiltered: 3,
|
|
|
|
ScoreMetaData: []*structs.NodeScoreMeta{
|
|
|
|
{
|
|
|
|
NodeID: node.ID,
|
|
|
|
Scores: map[string]float64{
|
|
|
|
"bin-packing": 0.5435,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2017-09-14 21:00:33 +00:00
|
|
|
// Create a few running alloc
|
|
|
|
var allocs []*structs.Allocation
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Job = job
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
2019-03-13 04:36:46 +00:00
|
|
|
alloc.Metrics = scoreMetric
|
2017-09-14 21:00:33 +00:00
|
|
|
allocs = append(allocs, alloc)
|
2016-07-27 18:54:55 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
2016-07-27 18:54:55 +00:00
|
|
|
|
2019-03-13 04:36:46 +00:00
|
|
|
// Update the job's modify index to force an inplace upgrade
|
|
|
|
updatedJob := job.Copy()
|
|
|
|
updatedJob.JobModifyIndex = job.JobModifyIndex + 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), updatedJob))
|
2019-03-13 04:36:46 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-09-14 21:00:33 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-09-14 21:00:33 +00:00
|
|
|
}
|
2016-07-27 18:54:55 +00:00
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewBatchScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-08-30 22:36:30 +00:00
|
|
|
|
2017-09-14 21:00:33 +00:00
|
|
|
// Ensure a plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
|
|
|
|
plan := h.Plans[0]
|
2016-08-30 22:36:30 +00:00
|
|
|
|
2019-03-13 04:36:46 +00:00
|
|
|
require := require.New(t)
|
2017-09-14 21:00:33 +00:00
|
|
|
// Ensure the plan evicted 4 of the 5
|
2019-03-13 04:36:46 +00:00
|
|
|
require.Equal(4, len(plan.NodeUpdate[node.ID]))
|
2017-09-14 21:00:33 +00:00
|
|
|
|
2019-03-13 04:36:46 +00:00
|
|
|
// Ensure that the scheduler did not overwrite the original score metrics for the i
|
|
|
|
for _, inPlaceAllocs := range plan.NodeAllocation {
|
|
|
|
for _, alloc := range inPlaceAllocs {
|
|
|
|
require.Equal(scoreMetric, alloc.Metrics)
|
|
|
|
}
|
|
|
|
}
|
2017-09-14 21:00:33 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
2016-07-27 18:54:55 +00:00
|
|
|
}
|
2016-08-16 00:52:41 +00:00
|
|
|
|
2020-01-07 19:48:05 +00:00
|
|
|
func TestGenericSched_AllocFit(t *testing.T) {
|
2020-01-09 19:34:46 +00:00
|
|
|
testCases := []struct {
|
|
|
|
Name string
|
|
|
|
NodeCpu int64
|
|
|
|
TaskResources structs.Resources
|
|
|
|
MainTaskCount int
|
|
|
|
InitTaskCount int
|
|
|
|
SideTaskCount int
|
|
|
|
ShouldPlaceAlloc bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Name: "simple init + sidecar",
|
|
|
|
NodeCpu: 1200,
|
|
|
|
TaskResources: structs.Resources{
|
|
|
|
CPU: 500,
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
MainTaskCount: 1,
|
|
|
|
InitTaskCount: 1,
|
|
|
|
SideTaskCount: 1,
|
|
|
|
ShouldPlaceAlloc: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "too big init + sidecar",
|
|
|
|
NodeCpu: 1200,
|
|
|
|
TaskResources: structs.Resources{
|
|
|
|
CPU: 700,
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
MainTaskCount: 1,
|
|
|
|
InitTaskCount: 1,
|
|
|
|
SideTaskCount: 1,
|
|
|
|
ShouldPlaceAlloc: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "many init + sidecar",
|
|
|
|
NodeCpu: 1200,
|
|
|
|
TaskResources: structs.Resources{
|
|
|
|
CPU: 100,
|
|
|
|
MemoryMB: 100,
|
|
|
|
},
|
|
|
|
MainTaskCount: 3,
|
|
|
|
InitTaskCount: 5,
|
|
|
|
SideTaskCount: 5,
|
|
|
|
ShouldPlaceAlloc: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "too many init + sidecar",
|
|
|
|
NodeCpu: 1200,
|
|
|
|
TaskResources: structs.Resources{
|
|
|
|
CPU: 100,
|
|
|
|
MemoryMB: 100,
|
|
|
|
},
|
|
|
|
MainTaskCount: 10,
|
|
|
|
InitTaskCount: 10,
|
|
|
|
SideTaskCount: 10,
|
|
|
|
ShouldPlaceAlloc: false,
|
|
|
|
},
|
2020-01-09 23:43:00 +00:00
|
|
|
{
|
|
|
|
Name: "too many too big",
|
|
|
|
NodeCpu: 1200,
|
|
|
|
TaskResources: structs.Resources{
|
|
|
|
CPU: 1000,
|
|
|
|
MemoryMB: 100,
|
|
|
|
},
|
|
|
|
MainTaskCount: 10,
|
|
|
|
InitTaskCount: 10,
|
|
|
|
SideTaskCount: 10,
|
|
|
|
ShouldPlaceAlloc: false,
|
|
|
|
},
|
2020-01-07 19:48:05 +00:00
|
|
|
}
|
2020-01-09 19:34:46 +00:00
|
|
|
for _, testCase := range testCases {
|
|
|
|
t.Run(testCase.Name, func(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
node := mock.Node()
|
|
|
|
node.NodeResources.Cpu.CpuShares = testCase.NodeCpu
|
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
// Create a job with sidecar & init tasks
|
|
|
|
job := mock.VariableLifecycleJob(testCase.TaskResources, testCase.MainTaskCount, testCase.InitTaskCount, testCase.SideTaskCount)
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.NoError(t, err)
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
allocs := 0
|
|
|
|
if testCase.ShouldPlaceAlloc {
|
|
|
|
allocs = 1
|
|
|
|
}
|
|
|
|
// Ensure no plan as it should be a no-op
|
|
|
|
require.Len(t, h.Plans, allocs)
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Ensure no allocations placed
|
|
|
|
require.Len(t, out, allocs)
|
2020-01-07 19:48:05 +00:00
|
|
|
|
2020-01-09 19:34:46 +00:00
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
})
|
|
|
|
}
|
2020-01-07 19:48:05 +00:00
|
|
|
}
|
|
|
|
|
2016-08-16 00:52:41 +00:00
|
|
|
func TestGenericSched_ChainedAlloc(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create some nodes
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
node := mock.Node()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-08-16 00:52:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a job
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2016-08-16 00:52:41 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-16 00:52:41 +00:00
|
|
|
Priority: job.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-16 00:52:41 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2016-08-16 00:52:41 +00:00
|
|
|
// Process the evaluation
|
|
|
|
if err := h.Process(NewServiceScheduler, eval); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var allocIDs []string
|
|
|
|
for _, allocList := range h.Plans[0].NodeAllocation {
|
|
|
|
for _, alloc := range allocList {
|
|
|
|
allocIDs = append(allocIDs, alloc.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(allocIDs)
|
|
|
|
|
|
|
|
// Create a new harness to invoke the scheduler again
|
|
|
|
h1 := NewHarnessWithState(t, h.State)
|
|
|
|
job1 := mock.Job()
|
|
|
|
job1.ID = job.ID
|
|
|
|
job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar"
|
|
|
|
job1.TaskGroups[0].Count = 12
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h1.State.UpsertJob(h1.NextIndex(), job1))
|
2016-08-16 00:52:41 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to update the job
|
|
|
|
eval1 := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-08-16 00:52:41 +00:00
|
|
|
Priority: job1.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job1.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-08-16 00:52:41 +00:00
|
|
|
}
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-08-16 00:52:41 +00:00
|
|
|
// Process the evaluation
|
|
|
|
if err := h1.Process(NewServiceScheduler, eval1); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
plan := h1.Plans[0]
|
|
|
|
|
|
|
|
// Collect all the chained allocation ids and the new allocations which
|
|
|
|
// don't have any chained allocations
|
|
|
|
var prevAllocs []string
|
|
|
|
var newAllocs []string
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
for _, alloc := range allocList {
|
|
|
|
if alloc.PreviousAllocation == "" {
|
|
|
|
newAllocs = append(newAllocs, alloc.ID)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
prevAllocs = append(prevAllocs, alloc.PreviousAllocation)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(prevAllocs)
|
|
|
|
|
2018-01-24 20:56:57 +00:00
|
|
|
// Ensure that the new allocations has their corresponding original
|
2016-08-16 00:52:41 +00:00
|
|
|
// allocation ids
|
|
|
|
if !reflect.DeepEqual(prevAllocs, allocIDs) {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", len(allocIDs), len(prevAllocs))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensuring two new allocations don't have any chained allocations
|
|
|
|
if len(newAllocs) != 2 {
|
|
|
|
t.Fatalf("expected: %v, actual: %v", 2, len(newAllocs))
|
|
|
|
}
|
|
|
|
}
|
2016-09-24 04:15:50 +00:00
|
|
|
|
|
|
|
func TestServiceSched_NodeDrain_Sticky(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Register a draining node
|
|
|
|
node := mock.Node()
|
|
|
|
node.Drain = true
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertNode(h.NextIndex(), node))
|
2016-09-24 04:15:50 +00:00
|
|
|
|
|
|
|
// Create an alloc on the draining node
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.Name = "my-job.web[0]"
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.Job.TaskGroups[0].Count = 1
|
|
|
|
alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true
|
2018-02-23 01:38:44 +00:00
|
|
|
alloc.DesiredTransition.Migrate = helper.BoolToPtr(true)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), alloc.Job))
|
|
|
|
require.NoError(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
|
2016-09-24 04:15:50 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deal with drain
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2016-09-24 04:15:50 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerNodeUpdate,
|
|
|
|
JobID: alloc.Job.ID,
|
|
|
|
NodeID: node.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2016-09-24 04:15:50 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2016-09-24 04:15:50 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan evicted all allocs
|
|
|
|
if len(plan.NodeUpdate[node.ID]) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan didn't create any new allocations
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// This test ensures that when a job is stopped, the scheduler properly cancels
|
|
|
|
// an outstanding deployment.
|
|
|
|
func TestServiceSched_CancelDeployment_Stopped(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Generate a fake job
|
|
|
|
job := mock.Job()
|
|
|
|
job.JobModifyIndex = job.CreateIndex + 1
|
|
|
|
job.ModifyIndex = job.CreateIndex + 1
|
|
|
|
job.Stop = true
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// Create a deployment
|
|
|
|
d := mock.Deployment()
|
|
|
|
d.JobID = job.ID
|
|
|
|
d.JobCreateIndex = job.CreateIndex
|
|
|
|
d.JobModifyIndex = job.JobModifyIndex - 1
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d))
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to deregister the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-05-18 19:36:04 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobDeregister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-05-18 19:36:04 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-05-18 19:36:04 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan cancelled the existing deployment
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("No deployment for job")
|
|
|
|
}
|
|
|
|
if out.ID != d.ID {
|
|
|
|
t.Fatalf("Latest deployment for job is different than original deployment")
|
|
|
|
}
|
|
|
|
if out.Status != structs.DeploymentStatusCancelled {
|
|
|
|
t.Fatalf("Deployment status is %q, want %q", out.Status, structs.DeploymentStatusCancelled)
|
|
|
|
}
|
|
|
|
if out.StatusDescription != structs.DeploymentStatusDescriptionStoppedJob {
|
|
|
|
t.Fatalf("Deployment status description is %q, want %q",
|
|
|
|
out.StatusDescription, structs.DeploymentStatusDescriptionStoppedJob)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure the plan didn't allocate anything
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test ensures that when a job is updated and had an old deployment, the scheduler properly cancels
|
|
|
|
// the deployment.
|
|
|
|
func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Generate a fake job
|
|
|
|
job := mock.Job()
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// Create a deployment for an old version of the job
|
|
|
|
d := mock.Deployment()
|
|
|
|
d.JobID = job.ID
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertDeployment(h.NextIndex(), d))
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// Upsert again to bump job version
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertJob(h.NextIndex(), job))
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
// Create a mock evaluation to kick the job
|
|
|
|
eval := &structs.Evaluation{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-05-18 19:36:04 +00:00
|
|
|
Priority: 50,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job.ID,
|
2017-12-18 20:55:36 +00:00
|
|
|
Status: structs.EvalStatusPending,
|
2017-05-18 19:36:04 +00:00
|
|
|
}
|
|
|
|
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
2017-12-18 20:55:36 +00:00
|
|
|
|
2017-05-18 19:36:04 +00:00
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure a single plan
|
|
|
|
if len(h.Plans) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", h.Plans)
|
|
|
|
}
|
|
|
|
plan := h.Plans[0]
|
|
|
|
|
|
|
|
// Ensure the plan cancelled the existing deployment
|
|
|
|
ws := memdb.NewWatchSet()
|
2017-09-07 23:56:15 +00:00
|
|
|
out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID)
|
2019-12-03 04:25:52 +00:00
|
|
|
require.NoError(t, err)
|
2017-05-18 19:36:04 +00:00
|
|
|
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("No deployment for job")
|
|
|
|
}
|
|
|
|
if out.ID != d.ID {
|
|
|
|
t.Fatalf("Latest deployment for job is different than original deployment")
|
|
|
|
}
|
|
|
|
if out.Status != structs.DeploymentStatusCancelled {
|
|
|
|
t.Fatalf("Deployment status is %q, want %q", out.Status, structs.DeploymentStatusCancelled)
|
|
|
|
}
|
|
|
|
if out.StatusDescription != structs.DeploymentStatusDescriptionNewerJob {
|
|
|
|
t.Fatalf("Deployment status description is %q, want %q",
|
|
|
|
out.StatusDescription, structs.DeploymentStatusDescriptionNewerJob)
|
|
|
|
}
|
|
|
|
// Ensure the plan didn't allocate anything
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
if len(planned) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", plan)
|
|
|
|
}
|
|
|
|
|
|
|
|
h.AssertEvalStatus(t, structs.EvalStatusComplete)
|
|
|
|
}
|
2018-03-02 00:23:44 +00:00
|
|
|
|
|
|
|
// Various table driven tests for carry forward
|
|
|
|
// of past reschedule events
|
|
|
|
func Test_updateRescheduleTracker(t *testing.T) {
|
|
|
|
|
|
|
|
t1 := time.Now().UTC()
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
prevAlloc := mock.Alloc()
|
|
|
|
|
|
|
|
type testCase struct {
|
|
|
|
desc string
|
|
|
|
prevAllocEvents []*structs.RescheduleEvent
|
|
|
|
reschedPolicy *structs.ReschedulePolicy
|
|
|
|
expectedRescheduleEvents []*structs.RescheduleEvent
|
|
|
|
reschedTime time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases := []testCase{
|
|
|
|
{
|
2018-09-04 23:03:52 +00:00
|
|
|
desc: "No past events",
|
|
|
|
prevAllocEvents: nil,
|
|
|
|
reschedPolicy: &structs.ReschedulePolicy{Unlimited: false, Interval: 24 * time.Hour, Attempts: 2, Delay: 5 * time.Second},
|
|
|
|
reschedTime: t1,
|
|
|
|
expectedRescheduleEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
2018-03-02 00:23:44 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "one past event, linear delay",
|
|
|
|
prevAllocEvents: []*structs.RescheduleEvent{
|
|
|
|
{RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second}},
|
|
|
|
reschedPolicy: &structs.ReschedulePolicy{Unlimited: false, Interval: 24 * time.Hour, Attempts: 2, Delay: 5 * time.Second},
|
|
|
|
reschedTime: t1,
|
|
|
|
expectedRescheduleEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "one past event, fibonacci delay",
|
|
|
|
prevAllocEvents: []*structs.RescheduleEvent{
|
|
|
|
{RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second}},
|
2018-03-13 15:06:26 +00:00
|
|
|
reschedPolicy: &structs.ReschedulePolicy{Unlimited: false, Interval: 24 * time.Hour, Attempts: 2, Delay: 5 * time.Second, DelayFunction: "fibonacci", MaxDelay: 60 * time.Second},
|
2018-03-02 00:23:44 +00:00
|
|
|
reschedTime: t1,
|
|
|
|
expectedRescheduleEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: "eight past events, fibonacci delay, unlimited",
|
|
|
|
prevAllocEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 10 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 15 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 25 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 40 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 65 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 105 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
2018-03-13 15:06:26 +00:00
|
|
|
reschedPolicy: &structs.ReschedulePolicy{Unlimited: true, Delay: 5 * time.Second, DelayFunction: "fibonacci", MaxDelay: 240 * time.Second},
|
2018-03-02 00:23:44 +00:00
|
|
|
reschedTime: t1,
|
|
|
|
expectedRescheduleEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 15 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 25 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 40 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 65 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-1 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 105 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 170 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
desc: " old attempts past interval, exponential delay, limited",
|
|
|
|
prevAllocEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-2 * time.Hour).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 5 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
2018-03-08 00:44:54 +00:00
|
|
|
RescheduleTime: t1.Add(-70 * time.Minute).UnixNano(),
|
2018-03-02 00:23:44 +00:00
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 10 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-30 * time.Minute).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 20 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-10 * time.Minute).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 40 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
2018-03-13 15:06:26 +00:00
|
|
|
reschedPolicy: &structs.ReschedulePolicy{Unlimited: false, Interval: 1 * time.Hour, Attempts: 5, Delay: 5 * time.Second, DelayFunction: "exponential", MaxDelay: 240 * time.Second},
|
2018-03-02 00:23:44 +00:00
|
|
|
reschedTime: t1,
|
|
|
|
expectedRescheduleEvents: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-30 * time.Minute).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 20 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.Add(-10 * time.Minute).UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 40 * time.Second,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: t1.UnixNano(),
|
|
|
|
PrevAllocID: prevAlloc.ID,
|
|
|
|
PrevNodeID: prevAlloc.NodeID,
|
|
|
|
Delay: 80 * time.Second,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.desc, func(t *testing.T) {
|
|
|
|
require := require.New(t)
|
|
|
|
prevAlloc.RescheduleTracker = &structs.RescheduleTracker{Events: tc.prevAllocEvents}
|
2018-03-08 15:36:01 +00:00
|
|
|
prevAlloc.Job.LookupTaskGroup(prevAlloc.TaskGroup).ReschedulePolicy = tc.reschedPolicy
|
|
|
|
updateRescheduleTracker(alloc, prevAlloc, tc.reschedTime)
|
2018-03-02 00:23:44 +00:00
|
|
|
require.Equal(tc.expectedRescheduleEvents, alloc.RescheduleTracker.Events)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2020-05-27 19:02:01 +00:00
|
|
|
|
|
|
|
func TestServiceSched_Preemption(t *testing.T) {
|
|
|
|
require := require.New(t)
|
|
|
|
h := NewHarness(t)
|
|
|
|
|
|
|
|
// Create a node
|
|
|
|
node := mock.Node()
|
|
|
|
node.Resources = nil
|
|
|
|
node.ReservedResources = nil
|
|
|
|
node.NodeResources = &structs.NodeResources{
|
|
|
|
Cpu: structs.NodeCpuResources{
|
|
|
|
CpuShares: 1000,
|
|
|
|
},
|
|
|
|
Memory: structs.NodeMemoryResources{
|
|
|
|
MemoryMB: 2048,
|
|
|
|
},
|
|
|
|
Disk: structs.NodeDiskResources{
|
|
|
|
DiskMB: 100 * 1024,
|
|
|
|
},
|
|
|
|
Networks: []*structs.NetworkResource{
|
|
|
|
{
|
2020-06-17 18:01:17 +00:00
|
|
|
Mode: "host",
|
2020-05-27 19:02:01 +00:00
|
|
|
Device: "eth0",
|
|
|
|
CIDR: "192.168.0.100/32",
|
|
|
|
MBits: 1000,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
node.ReservedResources = &structs.NodeReservedResources{
|
|
|
|
Cpu: structs.NodeReservedCpuResources{
|
|
|
|
CpuShares: 50,
|
|
|
|
},
|
|
|
|
Memory: structs.NodeReservedMemoryResources{
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
Disk: structs.NodeReservedDiskResources{
|
|
|
|
DiskMB: 4 * 1024,
|
|
|
|
},
|
|
|
|
Networks: structs.NodeReservedNetworkResources{
|
|
|
|
ReservedHostPorts: "22",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
require.NoError(h.State.UpsertNode(h.NextIndex(), node))
|
|
|
|
|
|
|
|
// Create a couple of jobs and schedule them
|
|
|
|
job1 := mock.Job()
|
|
|
|
job1.TaskGroups[0].Count = 1
|
|
|
|
job1.Priority = 30
|
|
|
|
r1 := job1.TaskGroups[0].Tasks[0].Resources
|
|
|
|
r1.CPU = 500
|
|
|
|
r1.MemoryMB = 1024
|
|
|
|
r1.Networks = nil
|
|
|
|
require.NoError(h.State.UpsertJob(h.NextIndex(), job1))
|
|
|
|
|
|
|
|
job2 := mock.Job()
|
|
|
|
job2.TaskGroups[0].Count = 1
|
|
|
|
job2.Priority = 50
|
|
|
|
r2 := job2.TaskGroups[0].Tasks[0].Resources
|
|
|
|
r2.CPU = 350
|
|
|
|
r2.MemoryMB = 512
|
|
|
|
r2.Networks = nil
|
|
|
|
require.NoError(h.State.UpsertJob(h.NextIndex(), job2))
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the jobs
|
|
|
|
eval1 := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job1.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job1.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
eval2 := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job2.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job2.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1, eval2}))
|
|
|
|
|
|
|
|
expectedPreemptedAllocs := make(map[string]struct{})
|
|
|
|
// Process the two evals for job1 and job2 and make sure they allocated
|
|
|
|
for index, eval := range []*structs.Evaluation{eval1, eval2} {
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.Nil(err)
|
|
|
|
|
|
|
|
plan := h.Plans[index]
|
|
|
|
|
|
|
|
// Ensure the plan doesn't have annotations.
|
|
|
|
require.Nil(plan.Annotations)
|
|
|
|
|
|
|
|
// Ensure the eval has no spawned blocked eval
|
|
|
|
require.Equal(0, len(h.CreateEvals))
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
require.Equal(1, len(planned))
|
|
|
|
expectedPreemptedAllocs[planned[0].ID] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a higher priority job
|
|
|
|
job3 := mock.Job()
|
|
|
|
job3.Priority = 100
|
|
|
|
job3.TaskGroups[0].Count = 1
|
|
|
|
r3 := job3.TaskGroups[0].Tasks[0].Resources
|
|
|
|
r3.CPU = 900
|
|
|
|
r3.MemoryMB = 1700
|
|
|
|
r3.Networks = nil
|
|
|
|
require.NoError(h.State.UpsertJob(h.NextIndex(), job3))
|
|
|
|
|
|
|
|
// Create a mock evaluation to register the job
|
|
|
|
eval := &structs.Evaluation{
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: uuid.Generate(),
|
|
|
|
Priority: job3.Priority,
|
|
|
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
|
|
|
JobID: job3.ID,
|
|
|
|
Status: structs.EvalStatusPending,
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
|
|
|
|
|
|
|
|
// Process the evaluation
|
|
|
|
err := h.Process(NewServiceScheduler, eval)
|
|
|
|
require.Nil(err)
|
|
|
|
|
|
|
|
// New plan should be the third one in the harness
|
|
|
|
plan := h.Plans[2]
|
|
|
|
|
|
|
|
// Ensure the eval has no spawned blocked eval
|
|
|
|
require.Equal(0, len(h.CreateEvals))
|
|
|
|
|
|
|
|
// Ensure the plan allocated
|
|
|
|
var planned []*structs.Allocation
|
|
|
|
for _, allocList := range plan.NodeAllocation {
|
|
|
|
planned = append(planned, allocList...)
|
|
|
|
}
|
|
|
|
require.Equal(1, len(planned))
|
|
|
|
|
|
|
|
// Lookup the allocations by JobID
|
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
out, err := h.State.AllocsByJob(ws, job3.Namespace, job3.ID, false)
|
|
|
|
require.NoError(err)
|
|
|
|
|
|
|
|
// Ensure all allocations placed
|
|
|
|
require.Equal(1, len(out))
|
|
|
|
actualPreemptedAllocs := make(map[string]struct{})
|
|
|
|
for _, id := range out[0].PreemptedAllocations {
|
|
|
|
actualPreemptedAllocs[id] = struct{}{}
|
|
|
|
}
|
|
|
|
require.Equal(expectedPreemptedAllocs, actualPreemptedAllocs)
|
|
|
|
}
|