Fixed some more tests
This commit is contained in:
parent
de2c79f421
commit
8f0d2a2775
|
@ -64,6 +64,7 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
|
|||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
alloc2 := &structs.Allocation{
|
||||
ID: structs.GenerateUUID(),
|
||||
|
@ -76,7 +77,10 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
|
|||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
noErr(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
noErr(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
|
||||
// Add a planned eviction to alloc1
|
||||
|
|
|
@ -247,22 +247,7 @@ func (s *GenericScheduler) process() (bool, error) {
|
|||
|
||||
// Decrement the number of allocations pending per task group based on the
|
||||
// number of allocations successfully placed
|
||||
if result != nil {
|
||||
for _, allocations := range result.NodeAllocation {
|
||||
for _, allocation := range allocations {
|
||||
// Ensure that the allocation is newly created
|
||||
if allocation.CreateIndex != result.AllocIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := s.queuedAllocs[allocation.TaskGroup]; ok {
|
||||
s.queuedAllocs[allocation.TaskGroup] -= 1
|
||||
} else {
|
||||
s.logger.Printf("[ERR] sched: allocation %q placed but not in list of unplaced allocations", allocation.TaskGroup)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
adjustQueuedAllocations(s.logger, result, s.queuedAllocs)
|
||||
|
||||
// If we got a state refresh, try again since we have stale data
|
||||
if newState != nil {
|
||||
|
|
|
@ -1118,6 +1118,9 @@ func TestServiceSched_JobDeregister(t *testing.T) {
|
|||
alloc.JobID = job.ID
|
||||
allocs = append(allocs, alloc)
|
||||
}
|
||||
for _, alloc := range allocs {
|
||||
h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))
|
||||
}
|
||||
noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
||||
|
||||
// Create a mock evaluation to deregister the job
|
||||
|
|
|
@ -204,6 +204,7 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) {
|
|||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
alloc2 := &structs.Allocation{
|
||||
ID: structs.GenerateUUID(),
|
||||
|
@ -216,7 +217,10 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) {
|
|||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
noErr(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
noErr(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
|
||||
task := &structs.Task{
|
||||
|
@ -280,6 +284,7 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
|
|||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
alloc2 := &structs.Allocation{
|
||||
ID: structs.GenerateUUID(),
|
||||
|
@ -292,7 +297,10 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
|
|||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
noErr(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
|
||||
noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
|
||||
noErr(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
|
||||
|
||||
// Add a planned eviction to alloc1
|
||||
|
|
|
@ -149,22 +149,7 @@ func (s *SystemScheduler) process() (bool, error) {
|
|||
|
||||
// Decrement the number of allocations pending per task group based on the
|
||||
// number of allocations successfully placed
|
||||
if result != nil {
|
||||
for _, allocations := range result.NodeAllocation {
|
||||
for _, allocation := range allocations {
|
||||
// Ensure that the allocation is newly created
|
||||
if allocation.CreateIndex != result.AllocIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := s.queuedAllocs[allocation.TaskGroup]; ok {
|
||||
s.queuedAllocs[allocation.TaskGroup] -= 1
|
||||
} else {
|
||||
s.logger.Printf("[ERR] sched: allocation %q placed but not in list of unplaced allocations", allocation.TaskGroup)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
adjustQueuedAllocations(s.logger, result, s.queuedAllocs)
|
||||
|
||||
// If we got a state refresh, try again since we have stale data
|
||||
if newState != nil {
|
||||
|
|
|
@ -644,6 +644,9 @@ func TestSystemSched_JobDeregister(t *testing.T) {
|
|||
alloc.Name = "my-job.web[0]"
|
||||
allocs = append(allocs, alloc)
|
||||
}
|
||||
for _, alloc := range allocs {
|
||||
noErr(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)))
|
||||
}
|
||||
noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
||||
|
||||
// Create a mock evaluation to deregister the job
|
||||
|
|
|
@ -596,3 +596,24 @@ func desiredUpdates(diff *diffResult, inplaceUpdates,
|
|||
|
||||
return desiredTgs
|
||||
}
|
||||
|
||||
// adjustQueuedAllocations decrements the number of allocations pending per task
|
||||
// group based on the number of allocations successfully placed
|
||||
func adjustQueuedAllocations(logger *log.Logger, result *structs.PlanResult, queuedAllocs map[string]int) {
|
||||
if result != nil {
|
||||
for _, allocations := range result.NodeAllocation {
|
||||
for _, allocation := range allocations {
|
||||
// Ensure that the allocation is newly created
|
||||
if allocation.CreateIndex != result.AllocIndex {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := queuedAllocs[allocation.TaskGroup]; ok {
|
||||
queuedAllocs[allocation.TaskGroup] -= 1
|
||||
} else {
|
||||
logger.Printf("[ERR] sched: allocation %q placed but not in list of unplaced allocations", allocation.TaskGroup)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -573,7 +573,7 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
|
|||
job := mock.Job()
|
||||
|
||||
node := mock.Node()
|
||||
noErr(t, state.UpsertNode(1000, node))
|
||||
noErr(t, state.UpsertNode(900, node))
|
||||
|
||||
// Register an alloc
|
||||
alloc := &structs.Allocation{
|
||||
|
@ -587,8 +587,10 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
|
|||
MemoryMB: 2048,
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
||||
noErr(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
||||
noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
|
||||
|
||||
// Create a new task group that prevents in-place updates.
|
||||
|
@ -619,7 +621,7 @@ func TestInplaceUpdate_NoMatch(t *testing.T) {
|
|||
job := mock.Job()
|
||||
|
||||
node := mock.Node()
|
||||
noErr(t, state.UpsertNode(1000, node))
|
||||
noErr(t, state.UpsertNode(900, node))
|
||||
|
||||
// Register an alloc
|
||||
alloc := &structs.Allocation{
|
||||
|
@ -633,8 +635,10 @@ func TestInplaceUpdate_NoMatch(t *testing.T) {
|
|||
MemoryMB: 2048,
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
TaskGroup: "web",
|
||||
}
|
||||
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
||||
noErr(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
|
||||
noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
|
||||
|
||||
// Create a new task group that requires too much resources.
|
||||
|
@ -664,7 +668,7 @@ func TestInplaceUpdate_Success(t *testing.T) {
|
|||
job := mock.Job()
|
||||
|
||||
node := mock.Node()
|
||||
noErr(t, state.UpsertNode(1000, node))
|
||||
noErr(t, state.UpsertNode(900, node))
|
||||
|
||||
// Register an alloc
|
||||
alloc := &structs.Allocation{
|
||||
|
@ -681,6 +685,7 @@ func TestInplaceUpdate_Success(t *testing.T) {
|
|||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
}
|
||||
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
||||
noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)))
|
||||
noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
|
||||
|
||||
// Create a new task group that updates the resources.
|
||||
|
|
Loading…
Reference in New Issue