From d930d488b5c62dd747a7b6f7bb7b8f65cddcb4ec Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 3 Feb 2016 12:00:43 -0800 Subject: [PATCH 1/2] Fix node drain --- scheduler/generic_sched.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scheduler/generic_sched.go b/scheduler/generic_sched.go index 1fc294cf6..0339b0244 100644 --- a/scheduler/generic_sched.go +++ b/scheduler/generic_sched.go @@ -272,7 +272,7 @@ func (s *GenericScheduler) computeJobAllocs() error { s.limitReached = evictAndPlace(s.ctx, diff, diff.migrate, allocMigrating, &limit) // Treat non in-place updates as an eviction and new placement. - s.limitReached = evictAndPlace(s.ctx, diff, diff.update, allocUpdating, &limit) + s.limitReached = s.limitReached || evictAndPlace(s.ctx, diff, diff.update, allocUpdating, &limit) // Nothing remaining to do if placement is not required if len(diff.place) == 0 { From 4e527b26b0f0d32570e2f201e97daaf47cacaf94 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Wed, 3 Feb 2016 14:15:02 -0800 Subject: [PATCH 2/2] test --- scheduler/generic_sched_test.go | 80 ++++++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 1 deletion(-) diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index e3711ae24..55f2189d2 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -588,7 +588,7 @@ func TestServiceSched_NodeDrain(t *testing.T) { noErr(t, h.State.UpsertNode(h.NextIndex(), node)) } - // Generate a fake job with allocations + // Generate a fake job with allocations and an update policy. job := mock.Job() noErr(t, h.State.UpsertJob(h.NextIndex(), job)) @@ -651,6 +651,84 @@ func TestServiceSched_NodeDrain(t *testing.T) { h.AssertEvalStatus(t, structs.EvalStatusComplete) } +func TestServiceSched_NodeDrain_UpdateStrategy(t *testing.T) { + h := NewHarness(t) + + // Register a draining node + node := mock.Node() + node.Drain = true + noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + + // Create some nodes + for i := 0; i < 10; i++ { + node := mock.Node() + noErr(t, h.State.UpsertNode(h.NextIndex(), node)) + } + + // Generate a fake job with allocations and an update policy. + job := mock.Job() + mp := 5 + job.Update = structs.UpdateStrategy{ + Stagger: time.Second, + MaxParallel: mp, + } + noErr(t, h.State.UpsertJob(h.NextIndex(), job)) + + var allocs []*structs.Allocation + for i := 0; i < 10; i++ { + alloc := mock.Alloc() + alloc.Job = job + alloc.JobID = job.ID + alloc.NodeID = node.ID + alloc.Name = fmt.Sprintf("my-job.web[%d]", i) + allocs = append(allocs, alloc) + } + noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) + + // Create a mock evaluation to deal with drain + eval := &structs.Evaluation{ + ID: structs.GenerateUUID(), + Priority: 50, + TriggeredBy: structs.EvalTriggerNodeUpdate, + JobID: job.ID, + NodeID: node.ID, + } + + // Process the evaluation + err := h.Process(NewServiceScheduler, eval) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Ensure a single plan + if len(h.Plans) != 1 { + t.Fatalf("bad: %#v", h.Plans) + } + plan := h.Plans[0] + + // Ensure the plan evicted all allocs + if len(plan.NodeUpdate[node.ID]) != mp { + t.Fatalf("bad: %#v", plan) + } + + // Ensure the plan allocated + var planned []*structs.Allocation + for _, allocList := range plan.NodeAllocation { + planned = append(planned, allocList...) + } + if len(planned) != mp { + t.Fatalf("bad: %#v", plan) + } + + // Ensure there is a followup eval. + if len(h.CreateEvals) != 1 || + h.CreateEvals[0].TriggeredBy != structs.EvalTriggerRollingUpdate { + t.Fatalf("bad: %#v", h.CreateEvals) + } + + h.AssertEvalStatus(t, structs.EvalStatusComplete) +} + func TestServiceSched_RetryLimit(t *testing.T) { h := NewHarness(t) h.Planner = &RejectPlan{h}