From 25cb7fc03dfcd1113779fbd75b5f9cc00ffa14dc Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 3 Feb 2016 21:22:18 -0800 Subject: [PATCH] Fix computed class when the job has multiple task groups --- nomad/structs/structs.go | 9 +++++ scheduler/generic_sched_test.go | 69 +++++++++++++++++++++++++++++++++ scheduler/stack.go | 2 + 3 files changed, 80 insertions(+) diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index dc236d618..f2e5e356b 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -1245,6 +1245,15 @@ type TaskGroup struct { Meta map[string]string } +func (tg *TaskGroup) Copy() *TaskGroup { + i, err := copystructure.Copy(tg) + if err != nil { + panic(err) + } + + return i.(*TaskGroup) +} + // InitFields is used to initialize fields in the TaskGroup. func (tg *TaskGroup) InitFields(job *Job) { // Set the default restart policy. diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 55f2189d2..e0506b8a9 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -224,6 +224,75 @@ func TestServiceSched_JobRegister_BlockedEval(t *testing.T) { h.AssertEvalStatus(t, structs.EvalStatusComplete) } +func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { + h := NewHarness(t) + + // Create one node + node := mock.Node() + node.NodeClass = "class_0" + noErr(t, node.ComputeClass()) + noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + + // Create a job that constrains on a node class + job := mock.Job() + job.TaskGroups[0].Count = 2 + job.TaskGroups[0].Constraints = append(job.Constraints, + &structs.Constraint{ + LTarget: "$node.class", + RTarget: "class_0", + Operand: "=", + }, + ) + tg2 := job.TaskGroups[0].Copy() + tg2.Name = "web2" + tg2.Constraints[1].RTarget = "class_1" + job.TaskGroups = append(job.TaskGroups, tg2) + noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + + // Create a mock evaluation to register the job + eval := &structs.Evaluation{ + ID: structs.GenerateUUID(), + Priority: job.Priority, + TriggeredBy: structs.EvalTriggerJobRegister, + JobID: job.ID, + } + + // Process the evaluation + err := h.Process(NewServiceScheduler, eval) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Ensure a single plan + if len(h.Plans) != 1 { + t.Fatalf("bad: %#v", h.Plans) + } + plan := h.Plans[0] + + // Ensure the plan allocated + var planned []*structs.Allocation + for _, allocList := range plan.NodeAllocation { + planned = append(planned, allocList...) + } + if len(planned) != 2 { + t.Fatalf("bad: %#v", plan) + } + if len(plan.FailedAllocs) != 1 { + t.Fatalf("bad: %#v", plan) + } + + // Lookup the allocations by JobID + out, err := h.State.AllocsByJob(job.ID) + noErr(t, err) + + // Ensure all allocations placed + if len(out) != 3 { + t.Fatalf("bad: %#v", out) + } + + h.AssertEvalStatus(t, structs.EvalStatusComplete) +} + func TestServiceSched_JobModify(t *testing.T) { h := NewHarness(t) diff --git a/scheduler/stack.go b/scheduler/stack.go index ccbf14e28..98681980e 100644 --- a/scheduler/stack.go +++ b/scheduler/stack.go @@ -153,6 +153,7 @@ func (s *GenericStack) Select(tg *structs.TaskGroup) (*RankedNode, *structs.Reso s.taskGroupDrivers.SetDrivers(tgConstr.drivers) s.taskGroupConstraint.SetConstraints(tgConstr.constraints) s.proposedAllocConstraint.SetTaskGroup(tg) + s.wrappedChecks.SetTaskGroup(tg.Name) s.binPack.SetTasks(tg.Tasks) // Find the node with the max score @@ -242,6 +243,7 @@ func (s *SystemStack) Select(tg *structs.TaskGroup) (*RankedNode, *structs.Resou s.taskGroupDrivers.SetDrivers(tgConstr.drivers) s.taskGroupConstraint.SetConstraints(tgConstr.constraints) s.binPack.SetTasks(tg.Tasks) + s.wrappedChecks.SetTaskGroup(tg.Name) // Get the next option that satisfies the constraints. option := s.binPack.Next()