open-nomad/scheduler/context_test.go

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

460 lines
12 KiB
Go
Raw Permalink Normal View History

// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
2015-08-13 18:33:58 +00:00
package scheduler
import (
"testing"
"github.com/hashicorp/nomad/ci"
2018-06-13 22:33:25 +00:00
"github.com/hashicorp/nomad/helper/testlog"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/hashicorp/nomad/nomad/mock"
2015-08-13 18:33:58 +00:00
"github.com/hashicorp/nomad/nomad/state"
2015-08-13 18:54:59 +00:00
"github.com/hashicorp/nomad/nomad/structs"
"github.com/stretchr/testify/require"
2015-08-13 18:33:58 +00:00
)
func testContext(t testing.TB) (*state.StateStore, *EvalContext) {
2017-10-13 21:36:02 +00:00
state := state.TestStateStore(t)
2015-08-13 20:08:15 +00:00
plan := &structs.Plan{
EvalID: uuid.Generate(),
NodeUpdate: make(map[string][]*structs.Allocation),
NodeAllocation: make(map[string][]*structs.Allocation),
NodePreemptions: make(map[string][]*structs.Allocation),
2015-08-13 20:08:15 +00:00
}
2015-08-13 18:33:58 +00:00
2018-09-15 23:23:13 +00:00
logger := testlog.HCLogger(t)
2015-08-13 18:54:59 +00:00
ctx := NewEvalContext(nil, state, plan, logger)
2015-08-13 18:33:58 +00:00
return state, ctx
}
func TestEvalContext_ProposedAlloc(t *testing.T) {
ci.Parallel(t)
state, ctx := testContext(t)
nodes := []*RankedNode{
2017-09-26 22:26:33 +00:00
{
Node: &structs.Node{
// Perfect fit
ID: uuid.Generate(),
2018-10-03 16:47:18 +00:00
NodeResources: &structs.NodeResources{
Cpu: structs.NodeCpuResources{
2018-10-04 21:33:09 +00:00
CpuShares: 2048,
2018-10-03 16:47:18 +00:00
},
Memory: structs.NodeMemoryResources{
MemoryMB: 2048,
},
},
},
},
2017-09-26 22:26:33 +00:00
{
Node: &structs.Node{
// Perfect fit
ID: uuid.Generate(),
2018-10-03 16:47:18 +00:00
NodeResources: &structs.NodeResources{
Cpu: structs.NodeCpuResources{
2018-10-04 21:33:09 +00:00
CpuShares: 2048,
2018-10-03 16:47:18 +00:00
},
Memory: structs.NodeMemoryResources{
MemoryMB: 2048,
},
},
},
},
}
// Add existing allocations
2017-05-01 20:54:26 +00:00
j1, j2 := mock.Job(), mock.Job()
alloc1 := &structs.Allocation{
ID: uuid.Generate(),
2017-09-07 23:56:15 +00:00
Namespace: structs.DefaultNamespace,
EvalID: uuid.Generate(),
2017-09-07 23:56:15 +00:00
NodeID: nodes[0].Node.ID,
JobID: j1.ID,
Job: j1,
2018-10-03 16:47:18 +00:00
AllocatedResources: &structs.AllocatedResources{
Tasks: map[string]*structs.AllocatedTaskResources{
"web": {
Cpu: structs.AllocatedCpuResources{
CpuShares: 2048,
},
Memory: structs.AllocatedMemoryResources{
MemoryMB: 2048,
},
},
},
},
2015-08-26 00:06:06 +00:00
DesiredStatus: structs.AllocDesiredStatusRun,
2015-12-16 23:01:15 +00:00
ClientStatus: structs.AllocClientStatusPending,
2016-07-22 21:53:49 +00:00
TaskGroup: "web",
}
alloc2 := &structs.Allocation{
ID: uuid.Generate(),
2017-09-07 23:56:15 +00:00
Namespace: structs.DefaultNamespace,
EvalID: uuid.Generate(),
2017-09-07 23:56:15 +00:00
NodeID: nodes[1].Node.ID,
JobID: j2.ID,
Job: j2,
2018-10-03 16:47:18 +00:00
AllocatedResources: &structs.AllocatedResources{
Tasks: map[string]*structs.AllocatedTaskResources{
"web": {
Cpu: structs.AllocatedCpuResources{
CpuShares: 1024,
},
Memory: structs.AllocatedMemoryResources{
MemoryMB: 1024,
},
},
},
},
2015-08-26 00:06:06 +00:00
DesiredStatus: structs.AllocDesiredStatusRun,
2015-12-16 23:01:15 +00:00
ClientStatus: structs.AllocClientStatusPending,
2016-07-22 21:53:49 +00:00
TaskGroup: "web",
}
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc1, alloc2}))
// Add a planned eviction to alloc1
plan := ctx.Plan()
2015-08-26 00:06:06 +00:00
plan.NodeUpdate[nodes[0].Node.ID] = []*structs.Allocation{alloc1}
// Add a planned placement to node1
plan.NodeAllocation[nodes[1].Node.ID] = []*structs.Allocation{
2017-09-26 22:26:33 +00:00
{
2018-10-03 16:47:18 +00:00
AllocatedResources: &structs.AllocatedResources{
Tasks: map[string]*structs.AllocatedTaskResources{
"web": {
Cpu: structs.AllocatedCpuResources{
CpuShares: 1024,
},
Memory: structs.AllocatedMemoryResources{
MemoryMB: 1024,
},
},
},
},
},
}
proposed, err := ctx.ProposedAllocs(nodes[0].Node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(proposed) != 0 {
t.Fatalf("bad: %#v", proposed)
}
proposed, err = ctx.ProposedAllocs(nodes[1].Node.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(proposed) != 2 {
t.Fatalf("bad: %#v", proposed)
}
}
// TestEvalContext_ProposedAlloc_EvictPreempt asserts both Evicted and
// Preempted allocs are removed from the allocs propsed for a node.
//
// See https://github.com/hashicorp/nomad/issues/6787
func TestEvalContext_ProposedAlloc_EvictPreempt(t *testing.T) {
ci.Parallel(t)
state, ctx := testContext(t)
nodes := []*RankedNode{
{
Node: &structs.Node{
ID: uuid.Generate(),
NodeResources: &structs.NodeResources{
Cpu: structs.NodeCpuResources{
CpuShares: 1024 * 3,
},
Memory: structs.NodeMemoryResources{
MemoryMB: 1024 * 3,
},
},
},
},
}
// Add existing allocations
j1, j2, j3 := mock.Job(), mock.Job(), mock.Job()
allocEvict := &structs.Allocation{
ID: uuid.Generate(),
Namespace: structs.DefaultNamespace,
EvalID: uuid.Generate(),
NodeID: nodes[0].Node.ID,
JobID: j1.ID,
Job: j1,
AllocatedResources: &structs.AllocatedResources{
Tasks: map[string]*structs.AllocatedTaskResources{
"web": {
Cpu: structs.AllocatedCpuResources{
CpuShares: 1024,
},
Memory: structs.AllocatedMemoryResources{
MemoryMB: 1024,
},
},
},
},
DesiredStatus: structs.AllocDesiredStatusRun,
ClientStatus: structs.AllocClientStatusPending,
TaskGroup: "web",
}
allocPreempt := &structs.Allocation{
ID: uuid.Generate(),
Namespace: structs.DefaultNamespace,
EvalID: uuid.Generate(),
NodeID: nodes[0].Node.ID,
JobID: j2.ID,
Job: j2,
AllocatedResources: &structs.AllocatedResources{
Tasks: map[string]*structs.AllocatedTaskResources{
"web": {
Cpu: structs.AllocatedCpuResources{
CpuShares: 1024,
},
Memory: structs.AllocatedMemoryResources{
MemoryMB: 1024,
},
},
},
},
DesiredStatus: structs.AllocDesiredStatusRun,
ClientStatus: structs.AllocClientStatusPending,
TaskGroup: "web",
}
allocPropose := &structs.Allocation{
ID: uuid.Generate(),
Namespace: structs.DefaultNamespace,
EvalID: uuid.Generate(),
NodeID: nodes[0].Node.ID,
JobID: j3.ID,
Job: j3,
AllocatedResources: &structs.AllocatedResources{
Tasks: map[string]*structs.AllocatedTaskResources{
"web": {
Cpu: structs.AllocatedCpuResources{
CpuShares: 1024,
},
Memory: structs.AllocatedMemoryResources{
MemoryMB: 1024,
},
},
},
},
DesiredStatus: structs.AllocDesiredStatusRun,
ClientStatus: structs.AllocClientStatusPending,
TaskGroup: "web",
}
require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(allocEvict.JobID)))
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPreempt.JobID)))
require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(allocPropose.JobID)))
require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{allocEvict, allocPreempt, allocPropose}))
// Plan to evict one alloc and preempt another
plan := ctx.Plan()
plan.NodePreemptions[nodes[0].Node.ID] = []*structs.Allocation{allocEvict}
plan.NodeUpdate[nodes[0].Node.ID] = []*structs.Allocation{allocPreempt}
proposed, err := ctx.ProposedAllocs(nodes[0].Node.ID)
require.NoError(t, err)
require.Len(t, proposed, 1)
}
func TestEvalEligibility_JobStatus(t *testing.T) {
ci.Parallel(t)
e := NewEvalEligibility()
cc := "v1:100"
// Get the job before its been set.
if status := e.JobStatus(cc); status != EvalComputedClassUnknown {
t.Fatalf("JobStatus() returned %v; want %v", status, EvalComputedClassUnknown)
}
// Set the job and get its status.
e.SetJobEligibility(false, cc)
if status := e.JobStatus(cc); status != EvalComputedClassIneligible {
t.Fatalf("JobStatus() returned %v; want %v", status, EvalComputedClassIneligible)
}
e.SetJobEligibility(true, cc)
if status := e.JobStatus(cc); status != EvalComputedClassEligible {
t.Fatalf("JobStatus() returned %v; want %v", status, EvalComputedClassEligible)
}
}
func TestEvalEligibility_TaskGroupStatus(t *testing.T) {
ci.Parallel(t)
e := NewEvalEligibility()
cc := "v1:100"
tg := "foo"
// Get the tg before its been set.
if status := e.TaskGroupStatus(tg, cc); status != EvalComputedClassUnknown {
t.Fatalf("TaskGroupStatus() returned %v; want %v", status, EvalComputedClassUnknown)
}
// Set the tg and get its status.
e.SetTaskGroupEligibility(false, tg, cc)
if status := e.TaskGroupStatus(tg, cc); status != EvalComputedClassIneligible {
t.Fatalf("TaskGroupStatus() returned %v; want %v", status, EvalComputedClassIneligible)
}
e.SetTaskGroupEligibility(true, tg, cc)
if status := e.TaskGroupStatus(tg, cc); status != EvalComputedClassEligible {
t.Fatalf("TaskGroupStatus() returned %v; want %v", status, EvalComputedClassEligible)
}
}
func TestEvalEligibility_SetJob(t *testing.T) {
ci.Parallel(t)
e := NewEvalEligibility()
ne1 := &structs.Constraint{
2016-02-05 00:50:20 +00:00
LTarget: "${attr.kernel.name}",
RTarget: "linux",
Operand: "=",
}
e1 := &structs.Constraint{
2016-02-05 00:50:20 +00:00
LTarget: "${attr.unique.kernel.name}",
RTarget: "linux",
Operand: "=",
}
e2 := &structs.Constraint{
2016-02-05 00:50:20 +00:00
LTarget: "${meta.unique.key_foo}",
RTarget: "linux",
Operand: "<",
}
e3 := &structs.Constraint{
2016-02-05 00:50:20 +00:00
LTarget: "${meta.unique.key_foo}",
RTarget: "Windows",
Operand: "<",
}
job := mock.Job()
jobCon := []*structs.Constraint{ne1, e1, e2}
job.Constraints = jobCon
// Set the task constraints
tg := job.TaskGroups[0]
tg.Constraints = []*structs.Constraint{e1}
tg.Tasks[0].Constraints = []*structs.Constraint{e3}
e.SetJob(job)
if !e.HasEscaped() {
t.Fatalf("HasEscaped() should be true")
}
if !e.jobEscaped {
t.Fatalf("SetJob() should mark job as escaped")
}
if escaped, ok := e.tgEscapedConstraints[tg.Name]; !ok || !escaped {
t.Fatalf("SetJob() should mark task group as escaped")
}
}
func TestEvalEligibility_GetClasses(t *testing.T) {
ci.Parallel(t)
e := NewEvalEligibility()
e.SetJobEligibility(true, "v1:1")
e.SetJobEligibility(false, "v1:2")
e.SetTaskGroupEligibility(true, "foo", "v1:3")
e.SetTaskGroupEligibility(false, "bar", "v1:4")
e.SetTaskGroupEligibility(true, "bar", "v1:5")
// Mark an existing eligible class as ineligible in the TG.
e.SetTaskGroupEligibility(false, "fizz", "v1:1")
e.SetTaskGroupEligibility(false, "fizz", "v1:3")
expClasses := map[string]bool{
2018-11-07 19:59:24 +00:00
"v1:1": false,
"v1:2": false,
"v1:3": true,
"v1:4": false,
"v1:5": true,
}
actClasses := e.GetClasses()
2018-11-07 19:59:24 +00:00
require.Equal(t, expClasses, actClasses)
}
func TestEvalEligibility_GetClasses_JobEligible_TaskGroupIneligible(t *testing.T) {
ci.Parallel(t)
e := NewEvalEligibility()
e.SetJobEligibility(true, "v1:1")
e.SetTaskGroupEligibility(false, "foo", "v1:1")
e.SetJobEligibility(true, "v1:2")
e.SetTaskGroupEligibility(false, "foo", "v1:2")
e.SetTaskGroupEligibility(true, "bar", "v1:2")
e.SetJobEligibility(true, "v1:3")
e.SetTaskGroupEligibility(false, "foo", "v1:3")
e.SetTaskGroupEligibility(false, "bar", "v1:3")
expClasses := map[string]bool{
"v1:1": false,
"v1:2": true,
"v1:3": false,
}
actClasses := e.GetClasses()
require.Equal(t, expClasses, actClasses)
}
func TestPortCollisionEvent_Copy(t *testing.T) {
ci.Parallel(t)
ev := &PortCollisionEvent{
Reason: "original",
Node: mock.Node(),
Allocations: []*structs.Allocation{
mock.Alloc(),
mock.Alloc(),
},
NetIndex: structs.NewNetworkIndex(),
}
ev.NetIndex.SetNode(ev.Node)
// Copy must be equal
evCopy := ev.Copy()
require.Equal(t, ev, evCopy)
// Modifying the copy should not affect the original value
evCopy.Reason = "copy"
require.NotEqual(t, ev.Reason, evCopy.Reason)
evCopy.Node.Attributes["test"] = "true"
require.NotEqual(t, ev.Node, evCopy.Node)
evCopy.Allocations = append(evCopy.Allocations, mock.Alloc())
require.NotEqual(t, ev.Allocations, evCopy.Allocations)
core: merge reserved_ports into host_networks (#13651) Fixes #13505 This fixes #13505 by treating reserved_ports like we treat a lot of jobspec settings: merging settings from more global stanzas (client.reserved.reserved_ports) "down" into more specific stanzas (client.host_networks[].reserved_ports). As discussed in #13505 there are other options, and since it's totally broken right now we have some flexibility: Treat overlapping reserved_ports on addresses as invalid and refuse to start agents. However, I'm not sure there's a cohesive model we want to publish right now since so much 0.9-0.12 compat code still exists! We would have to explain to folks that if their -network-interface and host_network addresses overlapped, they could only specify reserved_ports in one place or the other?! It gets ugly. Use the global client.reserved.reserved_ports value as the default and treat host_network[].reserverd_ports as overrides. My first suggestion in the issue, but @groggemans made me realize the addresses on the agent's interface (as configured by -network-interface) may overlap with host_networks, so you'd need to remove the global reserved_ports from addresses shared with a shared network?! This seemed really confusing and subtle for users to me. So I think "merging down" creates the most expressive yet understandable approach. I've played around with it a bit, and it doesn't seem too surprising. The only frustrating part is how difficult it is to observe the available addresses and ports on a node! However that's a job for another PR.
2022-07-12 21:40:25 +00:00
evCopy.NetIndex.AddAllocs(evCopy.Allocations)
require.NotEqual(t, ev.NetIndex, evCopy.NetIndex)
}
func TestPortCollisionEvent_Sanitize(t *testing.T) {
ci.Parallel(t)
ev := &PortCollisionEvent{
Reason: "original",
Node: mock.Node(),
Allocations: []*structs.Allocation{
mock.Alloc(),
},
NetIndex: structs.NewNetworkIndex(),
}
cleanEv := ev.Sanitize()
require.Empty(t, cleanEv.Node.SecretID)
require.Nil(t, cleanEv.Allocations[0].Job)
}