2020-02-03 21:03:33 +00:00
|
|
|
package systemsched
|
|
|
|
|
|
|
|
import (
|
|
|
|
"github.com/hashicorp/nomad/api"
|
|
|
|
"github.com/hashicorp/nomad/e2e/e2eutil"
|
|
|
|
"github.com/hashicorp/nomad/e2e/framework"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
)
|
|
|
|
|
|
|
|
type SystemSchedTest struct {
|
|
|
|
framework.TC
|
2020-02-04 17:02:01 +00:00
|
|
|
jobIDs []string
|
2020-02-03 21:03:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
framework.AddSuites(&framework.TestSuite{
|
|
|
|
Component: "SystemScheduler",
|
|
|
|
CanRunLocal: true,
|
|
|
|
Cases: []framework.TestCase{
|
|
|
|
new(SystemSchedTest),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tc *SystemSchedTest) BeforeAll(f *framework.F) {
|
|
|
|
// Ensure cluster has leader before running tests
|
|
|
|
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
|
|
|
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 4)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tc *SystemSchedTest) TestJobUpdateOnIneligbleNode(f *framework.F) {
|
|
|
|
t := f.T()
|
|
|
|
nomadClient := tc.Nomad()
|
|
|
|
|
|
|
|
jobID := "system_deployment"
|
|
|
|
tc.jobIDs = append(tc.jobIDs, jobID)
|
|
|
|
e2eutil.RegisterAndWaitForAllocs(t, nomadClient, "systemsched/input/system_job0.nomad", jobID, "")
|
|
|
|
|
|
|
|
jobs := nomadClient.Jobs()
|
|
|
|
allocs, _, err := jobs.Allocations(jobID, true, nil)
|
|
|
|
|
2020-02-04 16:55:50 +00:00
|
|
|
var allocIDs []string
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
allocIDs = append(allocIDs, alloc.ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for allocations to get past initial pending state
|
|
|
|
e2eutil.WaitForAllocsNotPending(t, nomadClient, allocIDs)
|
|
|
|
|
2020-02-03 21:03:33 +00:00
|
|
|
// Mark one node as ineligible
|
|
|
|
nodesAPI := tc.Nomad().Nodes()
|
|
|
|
disabledNodeID := allocs[0].NodeID
|
|
|
|
_, err = nodesAPI.ToggleEligibility(disabledNodeID, false, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Assert all jobs still running
|
|
|
|
jobs = nomadClient.Jobs()
|
|
|
|
allocs, _, err = jobs.Allocations(jobID, true, nil)
|
|
|
|
|
2020-02-04 16:55:50 +00:00
|
|
|
allocIDs = nil
|
2020-02-03 21:03:33 +00:00
|
|
|
for _, alloc := range allocs {
|
2020-02-04 16:55:50 +00:00
|
|
|
allocIDs = append(allocIDs, alloc.ID)
|
|
|
|
}
|
2020-02-03 21:03:33 +00:00
|
|
|
|
2020-02-04 16:55:50 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
allocForDisabledNode := make(map[string]*api.AllocationListStub)
|
|
|
|
|
|
|
|
// Wait for allocs to run and collect allocs on ineligible node
|
|
|
|
// Allocation could have failed, ensure there is one thats running
|
|
|
|
// and that it is the correct version (0)
|
|
|
|
e2eutil.WaitForAllocsNotPending(t, nomadClient, allocIDs)
|
|
|
|
for _, alloc := range allocs {
|
2020-02-03 21:03:33 +00:00
|
|
|
if alloc.NodeID == disabledNodeID {
|
2020-02-04 16:55:50 +00:00
|
|
|
allocForDisabledNode[alloc.ID] = alloc
|
2020-02-03 21:03:33 +00:00
|
|
|
}
|
|
|
|
}
|
2020-02-04 16:55:50 +00:00
|
|
|
|
|
|
|
// Filter down to only our latest running alloc
|
|
|
|
for _, alloc := range allocForDisabledNode {
|
|
|
|
require.Equal(t, uint64(0), alloc.JobVersion)
|
|
|
|
if alloc.ClientStatus == structs.AllocClientStatusComplete {
|
|
|
|
// remove the old complete alloc from map
|
|
|
|
delete(allocForDisabledNode, alloc.ID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
require.NotEmpty(t, allocForDisabledNode)
|
|
|
|
require.Len(t, allocForDisabledNode, 1)
|
2020-02-03 21:03:33 +00:00
|
|
|
|
|
|
|
// Update job
|
|
|
|
e2eutil.RegisterAndWaitForAllocs(t, nomadClient, "systemsched/input/system_job1.nomad", jobID, "")
|
|
|
|
|
|
|
|
// Get updated allocations
|
|
|
|
jobs = nomadClient.Jobs()
|
|
|
|
allocs, _, err = jobs.Allocations(jobID, false, nil)
|
|
|
|
require.NoError(t, err)
|
2020-02-04 16:55:50 +00:00
|
|
|
|
|
|
|
allocIDs = nil
|
2020-02-03 21:03:33 +00:00
|
|
|
for _, alloc := range allocs {
|
|
|
|
allocIDs = append(allocIDs, alloc.ID)
|
|
|
|
}
|
|
|
|
|
2020-02-04 16:55:50 +00:00
|
|
|
// Wait for allocs to start
|
|
|
|
e2eutil.WaitForAllocsNotPending(t, nomadClient, allocIDs)
|
2020-02-03 21:03:33 +00:00
|
|
|
|
2020-02-04 16:55:50 +00:00
|
|
|
// Get latest alloc status now that they are no longer pending
|
2020-02-03 21:03:33 +00:00
|
|
|
allocs, _, err = jobs.Allocations(jobID, false, nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var foundPreviousAlloc bool
|
2020-02-04 16:55:50 +00:00
|
|
|
for _, dAlloc := range allocForDisabledNode {
|
|
|
|
for _, alloc := range allocs {
|
|
|
|
if alloc.ID == dAlloc.ID {
|
|
|
|
foundPreviousAlloc = true
|
|
|
|
require.Equal(t, uint64(0), alloc.JobVersion)
|
|
|
|
} else {
|
|
|
|
// Ensure allocs running on non disabled node are
|
|
|
|
// newer version
|
|
|
|
if alloc.ClientStatus == structs.AllocClientStatusRunning {
|
|
|
|
require.Equal(t, uint64(1), alloc.JobVersion)
|
|
|
|
}
|
|
|
|
}
|
2020-02-03 21:03:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
require.True(t, foundPreviousAlloc, "unable to find previous alloc for ineligible node")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tc *SystemSchedTest) AfterEach(f *framework.F) {
|
|
|
|
nomadClient := tc.Nomad()
|
|
|
|
|
|
|
|
// Mark all nodes eligible
|
|
|
|
nodesAPI := tc.Nomad().Nodes()
|
|
|
|
nodes, _, _ := nodesAPI.List(nil)
|
|
|
|
for _, node := range nodes {
|
|
|
|
nodesAPI.ToggleEligibility(node.ID, true, nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
jobs := nomadClient.Jobs()
|
|
|
|
// Stop all jobs in test
|
|
|
|
for _, id := range tc.jobIDs {
|
|
|
|
jobs.Deregister(id, true, nil)
|
|
|
|
}
|
|
|
|
tc.jobIDs = []string{}
|
|
|
|
// Garbage collect
|
|
|
|
nomadClient.System().GarbageCollect()
|
|
|
|
}
|