2015-08-13 23:25:59 +00:00
|
|
|
package scheduler
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-10-16 23:35:55 +00:00
|
|
|
"log"
|
2015-08-14 00:19:09 +00:00
|
|
|
"os"
|
2015-09-07 18:23:38 +00:00
|
|
|
"reflect"
|
2015-08-13 23:25:59 +00:00
|
|
|
"testing"
|
|
|
|
|
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
2015-08-14 00:19:09 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
2015-08-13 23:25:59 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
)
|
|
|
|
|
2016-05-16 19:49:18 +00:00
|
|
|
// noErr is used to assert there are no errors
|
|
|
|
func noErr(t *testing.T, err error) {
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-13 23:25:59 +00:00
|
|
|
func TestMaterializeTaskGroups(t *testing.T) {
|
|
|
|
job := mock.Job()
|
|
|
|
index := materializeTaskGroups(job)
|
|
|
|
if len(index) != 10 {
|
|
|
|
t.Fatalf("Bad: %#v", index)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
name := fmt.Sprintf("my-job.web[%d]", i)
|
|
|
|
tg, ok := index[name]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
if tg != job.TaskGroups[0] {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDiffAllocs(t *testing.T) {
|
|
|
|
job := mock.Job()
|
|
|
|
required := materializeTaskGroups(job)
|
|
|
|
|
|
|
|
// The "old" job has a previous modify index
|
|
|
|
oldJob := new(structs.Job)
|
|
|
|
*oldJob = *job
|
2016-01-12 17:50:33 +00:00
|
|
|
oldJob.JobModifyIndex -= 1
|
2015-08-13 23:25:59 +00:00
|
|
|
|
2015-08-13 23:47:39 +00:00
|
|
|
tainted := map[string]bool{
|
|
|
|
"dead": true,
|
|
|
|
"zip": false,
|
|
|
|
}
|
|
|
|
|
2015-08-13 23:25:59 +00:00
|
|
|
allocs := []*structs.Allocation{
|
|
|
|
// Update the 1st
|
|
|
|
&structs.Allocation{
|
2015-09-07 22:23:03 +00:00
|
|
|
ID: structs.GenerateUUID(),
|
2015-08-13 23:25:59 +00:00
|
|
|
NodeID: "zip",
|
|
|
|
Name: "my-job.web[0]",
|
|
|
|
Job: oldJob,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Ignore the 2rd
|
|
|
|
&structs.Allocation{
|
2015-09-07 22:23:03 +00:00
|
|
|
ID: structs.GenerateUUID(),
|
2015-08-13 23:25:59 +00:00
|
|
|
NodeID: "zip",
|
|
|
|
Name: "my-job.web[1]",
|
|
|
|
Job: job,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Evict 11th
|
|
|
|
&structs.Allocation{
|
2015-09-07 22:23:03 +00:00
|
|
|
ID: structs.GenerateUUID(),
|
2015-08-13 23:25:59 +00:00
|
|
|
NodeID: "zip",
|
|
|
|
Name: "my-job.web[10]",
|
2016-05-25 00:47:03 +00:00
|
|
|
Job: oldJob,
|
2015-08-13 23:25:59 +00:00
|
|
|
},
|
2015-08-13 23:47:39 +00:00
|
|
|
|
|
|
|
// Migrate the 3rd
|
|
|
|
&structs.Allocation{
|
2015-09-07 22:23:03 +00:00
|
|
|
ID: structs.GenerateUUID(),
|
2015-08-13 23:47:39 +00:00
|
|
|
NodeID: "dead",
|
|
|
|
Name: "my-job.web[2]",
|
2016-05-25 00:47:03 +00:00
|
|
|
Job: oldJob,
|
2015-08-13 23:47:39 +00:00
|
|
|
},
|
2015-08-13 23:25:59 +00:00
|
|
|
}
|
|
|
|
|
2015-08-14 01:28:09 +00:00
|
|
|
diff := diffAllocs(job, tainted, required, allocs)
|
|
|
|
place := diff.place
|
|
|
|
update := diff.update
|
|
|
|
migrate := diff.migrate
|
2015-08-26 00:06:06 +00:00
|
|
|
stop := diff.stop
|
2015-08-14 01:28:09 +00:00
|
|
|
ignore := diff.ignore
|
2015-08-13 23:25:59 +00:00
|
|
|
|
|
|
|
// We should update the first alloc
|
2015-08-14 01:16:32 +00:00
|
|
|
if len(update) != 1 || update[0].Alloc != allocs[0] {
|
2015-08-13 23:25:59 +00:00
|
|
|
t.Fatalf("bad: %#v", update)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should ignore the second alloc
|
2015-08-14 01:16:32 +00:00
|
|
|
if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
|
2015-08-13 23:25:59 +00:00
|
|
|
t.Fatalf("bad: %#v", ignore)
|
|
|
|
}
|
|
|
|
|
2015-08-26 00:06:06 +00:00
|
|
|
// We should stop the 3rd alloc
|
|
|
|
if len(stop) != 1 || stop[0].Alloc != allocs[2] {
|
|
|
|
t.Fatalf("bad: %#v", stop)
|
2015-08-13 23:25:59 +00:00
|
|
|
}
|
|
|
|
|
2015-08-13 23:47:39 +00:00
|
|
|
// We should migrate the 4rd alloc
|
2015-08-14 01:16:32 +00:00
|
|
|
if len(migrate) != 1 || migrate[0].Alloc != allocs[3] {
|
2015-08-13 23:47:39 +00:00
|
|
|
t.Fatalf("bad: %#v", migrate)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should place 7
|
|
|
|
if len(place) != 7 {
|
2015-08-13 23:25:59 +00:00
|
|
|
t.Fatalf("bad: %#v", place)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-15 20:14:44 +00:00
|
|
|
func TestDiffSystemAllocs(t *testing.T) {
|
|
|
|
job := mock.SystemJob()
|
|
|
|
|
|
|
|
// Create three alive nodes.
|
|
|
|
nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}}
|
|
|
|
|
|
|
|
// The "old" job has a previous modify index
|
|
|
|
oldJob := new(structs.Job)
|
|
|
|
*oldJob = *job
|
2016-01-12 17:50:33 +00:00
|
|
|
oldJob.JobModifyIndex -= 1
|
2015-10-15 20:14:44 +00:00
|
|
|
|
|
|
|
tainted := map[string]bool{
|
|
|
|
"dead": true,
|
|
|
|
"baz": false,
|
|
|
|
}
|
|
|
|
|
|
|
|
allocs := []*structs.Allocation{
|
|
|
|
// Update allocation on baz
|
|
|
|
&structs.Allocation{
|
|
|
|
ID: structs.GenerateUUID(),
|
|
|
|
NodeID: "baz",
|
|
|
|
Name: "my-job.web[0]",
|
|
|
|
Job: oldJob,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Ignore allocation on bar
|
|
|
|
&structs.Allocation{
|
|
|
|
ID: structs.GenerateUUID(),
|
|
|
|
NodeID: "bar",
|
|
|
|
Name: "my-job.web[0]",
|
|
|
|
Job: job,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Stop allocation on dead.
|
|
|
|
&structs.Allocation{
|
|
|
|
ID: structs.GenerateUUID(),
|
|
|
|
NodeID: "dead",
|
|
|
|
Name: "my-job.web[0]",
|
2016-05-25 00:47:03 +00:00
|
|
|
Job: oldJob,
|
2015-10-15 20:14:44 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
diff := diffSystemAllocs(job, nodes, tainted, allocs)
|
|
|
|
place := diff.place
|
|
|
|
update := diff.update
|
|
|
|
migrate := diff.migrate
|
|
|
|
stop := diff.stop
|
|
|
|
ignore := diff.ignore
|
|
|
|
|
|
|
|
// We should update the first alloc
|
|
|
|
if len(update) != 1 || update[0].Alloc != allocs[0] {
|
|
|
|
t.Fatalf("bad: %#v", update)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should ignore the second alloc
|
|
|
|
if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
|
|
|
|
t.Fatalf("bad: %#v", ignore)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should stop the third alloc
|
|
|
|
if len(stop) != 1 || stop[0].Alloc != allocs[2] {
|
|
|
|
t.Fatalf("bad: %#v", stop)
|
|
|
|
}
|
|
|
|
|
|
|
|
// There should be no migrates.
|
|
|
|
if len(migrate) != 0 {
|
|
|
|
t.Fatalf("bad: %#v", migrate)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should place 1
|
|
|
|
if len(place) != 1 {
|
|
|
|
t.Fatalf("bad: %#v", place)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-14 00:19:09 +00:00
|
|
|
func TestReadyNodesInDCs(t *testing.T) {
|
|
|
|
state, err := state.NewStateStore(os.Stderr)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
node1 := mock.Node()
|
|
|
|
node2 := mock.Node()
|
|
|
|
node2.Datacenter = "dc2"
|
|
|
|
node3 := mock.Node()
|
|
|
|
node3.Datacenter = "dc2"
|
|
|
|
node3.Status = structs.NodeStatusDown
|
2015-09-07 02:47:02 +00:00
|
|
|
node4 := mock.Node()
|
|
|
|
node4.Drain = true
|
2015-08-14 00:19:09 +00:00
|
|
|
|
2015-09-07 03:47:42 +00:00
|
|
|
noErr(t, state.UpsertNode(1000, node1))
|
|
|
|
noErr(t, state.UpsertNode(1001, node2))
|
|
|
|
noErr(t, state.UpsertNode(1002, node3))
|
|
|
|
noErr(t, state.UpsertNode(1003, node4))
|
2015-08-14 00:19:09 +00:00
|
|
|
|
2016-01-04 20:07:33 +00:00
|
|
|
nodes, dc, err := readyNodesInDCs(state, []string{"dc1", "dc2"})
|
2015-08-14 00:19:09 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(nodes) != 2 {
|
2015-08-15 23:10:10 +00:00
|
|
|
t.Fatalf("bad: %v", nodes)
|
2015-08-14 00:19:09 +00:00
|
|
|
}
|
|
|
|
if nodes[0].ID == node3.ID || nodes[1].ID == node3.ID {
|
|
|
|
t.Fatalf("Bad: %#v", nodes)
|
|
|
|
}
|
2016-01-04 22:33:10 +00:00
|
|
|
if count, ok := dc["dc1"]; !ok || count != 1 {
|
2016-01-04 20:07:33 +00:00
|
|
|
t.Fatalf("Bad: dc1 count %v", count)
|
|
|
|
}
|
2016-01-04 22:33:10 +00:00
|
|
|
if count, ok := dc["dc2"]; !ok || count != 1 {
|
2016-01-04 20:07:33 +00:00
|
|
|
t.Fatalf("Bad: dc2 count %v", count)
|
|
|
|
}
|
2015-08-14 00:19:09 +00:00
|
|
|
}
|
2015-08-14 00:40:23 +00:00
|
|
|
|
|
|
|
func TestRetryMax(t *testing.T) {
|
|
|
|
calls := 0
|
|
|
|
bad := func() (bool, error) {
|
|
|
|
calls += 1
|
|
|
|
return false, nil
|
|
|
|
}
|
2016-02-10 05:24:47 +00:00
|
|
|
err := retryMax(3, bad, nil)
|
2015-08-14 00:40:23 +00:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("should fail")
|
|
|
|
}
|
|
|
|
if calls != 3 {
|
|
|
|
t.Fatalf("mis match")
|
|
|
|
}
|
|
|
|
|
2016-02-10 05:24:47 +00:00
|
|
|
calls = 0
|
|
|
|
first := true
|
|
|
|
reset := func() bool {
|
|
|
|
if calls == 3 && first {
|
|
|
|
first = false
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
err = retryMax(3, bad, reset)
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("should fail")
|
|
|
|
}
|
|
|
|
if calls != 6 {
|
|
|
|
t.Fatalf("mis match")
|
|
|
|
}
|
|
|
|
|
2015-08-14 00:40:23 +00:00
|
|
|
calls = 0
|
|
|
|
good := func() (bool, error) {
|
|
|
|
calls += 1
|
|
|
|
return true, nil
|
|
|
|
}
|
2016-02-10 05:24:47 +00:00
|
|
|
err = retryMax(3, good, nil)
|
2015-08-14 00:40:23 +00:00
|
|
|
if err != nil {
|
2015-08-15 23:10:10 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
2015-08-14 00:40:23 +00:00
|
|
|
}
|
|
|
|
if calls != 1 {
|
|
|
|
t.Fatalf("mis match")
|
|
|
|
}
|
|
|
|
}
|
2015-08-14 00:51:31 +00:00
|
|
|
|
|
|
|
func TestTaintedNodes(t *testing.T) {
|
|
|
|
state, err := state.NewStateStore(os.Stderr)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
node1 := mock.Node()
|
|
|
|
node2 := mock.Node()
|
|
|
|
node2.Datacenter = "dc2"
|
|
|
|
node3 := mock.Node()
|
|
|
|
node3.Datacenter = "dc2"
|
|
|
|
node3.Status = structs.NodeStatusDown
|
2015-09-07 02:47:02 +00:00
|
|
|
node4 := mock.Node()
|
|
|
|
node4.Drain = true
|
2015-09-07 03:47:42 +00:00
|
|
|
noErr(t, state.UpsertNode(1000, node1))
|
|
|
|
noErr(t, state.UpsertNode(1001, node2))
|
|
|
|
noErr(t, state.UpsertNode(1002, node3))
|
|
|
|
noErr(t, state.UpsertNode(1003, node4))
|
2015-08-14 00:51:31 +00:00
|
|
|
|
|
|
|
allocs := []*structs.Allocation{
|
|
|
|
&structs.Allocation{NodeID: node1.ID},
|
|
|
|
&structs.Allocation{NodeID: node2.ID},
|
|
|
|
&structs.Allocation{NodeID: node3.ID},
|
2015-09-07 02:47:02 +00:00
|
|
|
&structs.Allocation{NodeID: node4.ID},
|
2016-01-14 20:57:43 +00:00
|
|
|
&structs.Allocation{NodeID: "12345678-abcd-efab-cdef-123456789abc"},
|
2015-08-14 00:51:31 +00:00
|
|
|
}
|
|
|
|
tainted, err := taintedNodes(state, allocs)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-09-07 02:47:02 +00:00
|
|
|
if len(tainted) != 5 {
|
2015-08-15 23:10:10 +00:00
|
|
|
t.Fatalf("bad: %v", tainted)
|
2015-08-14 00:51:31 +00:00
|
|
|
}
|
|
|
|
if tainted[node1.ID] || tainted[node2.ID] {
|
|
|
|
t.Fatalf("Bad: %v", tainted)
|
|
|
|
}
|
2016-01-14 20:57:43 +00:00
|
|
|
if !tainted[node3.ID] || !tainted[node4.ID] || !tainted["12345678-abcd-efab-cdef-123456789abc"] {
|
2015-08-14 00:51:31 +00:00
|
|
|
t.Fatalf("Bad: %v", tainted)
|
|
|
|
}
|
|
|
|
}
|
2015-09-07 18:23:38 +00:00
|
|
|
|
|
|
|
func TestShuffleNodes(t *testing.T) {
|
2015-10-17 00:05:23 +00:00
|
|
|
// Use a large number of nodes to make the probability of shuffling to the
|
|
|
|
// original order very low.
|
2015-09-07 18:23:38 +00:00
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
2015-10-17 00:05:23 +00:00
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
2015-09-07 18:23:38 +00:00
|
|
|
}
|
|
|
|
orig := make([]*structs.Node, len(nodes))
|
|
|
|
copy(orig, nodes)
|
|
|
|
shuffleNodes(nodes)
|
|
|
|
if reflect.DeepEqual(nodes, orig) {
|
2015-10-17 00:05:23 +00:00
|
|
|
t.Fatalf("should not match")
|
2015-09-07 18:23:38 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-07 19:25:23 +00:00
|
|
|
|
|
|
|
func TestTasksUpdated(t *testing.T) {
|
|
|
|
j1 := mock.Job()
|
|
|
|
j2 := mock.Job()
|
|
|
|
|
|
|
|
if tasksUpdated(j1.TaskGroups[0], j2.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
j2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j2.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
j3 := mock.Job()
|
|
|
|
j3.TaskGroups[0].Tasks[0].Name = "foo"
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j3.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
j4 := mock.Job()
|
|
|
|
j4.TaskGroups[0].Tasks[0].Driver = "foo"
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j4.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
j5 := mock.Job()
|
|
|
|
j5.TaskGroups[0].Tasks = append(j5.TaskGroups[0].Tasks,
|
|
|
|
j5.TaskGroups[0].Tasks[0])
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j5.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
2015-09-13 23:41:53 +00:00
|
|
|
|
|
|
|
j6 := mock.Job()
|
2015-11-15 06:28:11 +00:00
|
|
|
j6.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts = []structs.Port{{"http", 0}, {"https", 0}, {"admin", 0}}
|
2015-09-13 23:41:53 +00:00
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j6.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
2015-10-23 21:52:06 +00:00
|
|
|
|
|
|
|
j7 := mock.Job()
|
|
|
|
j7.TaskGroups[0].Tasks[0].Env["NEW_ENV"] = "NEW_VALUE"
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j7.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
2016-04-26 00:20:25 +00:00
|
|
|
|
|
|
|
j8 := mock.Job()
|
|
|
|
j8.TaskGroups[0].Tasks[0].User = "foo"
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j8.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
j9 := mock.Job()
|
|
|
|
j9.TaskGroups[0].Tasks[0].Artifacts = []*structs.TaskArtifact{
|
|
|
|
{
|
|
|
|
GetterSource: "http://foo.com/bar",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j9.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
j10 := mock.Job()
|
|
|
|
j10.TaskGroups[0].Tasks[0].Meta["baz"] = "boom"
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j10.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
j11 := mock.Job()
|
|
|
|
j11.TaskGroups[0].Tasks[0].Resources.CPU = 1337
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j11.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
2016-05-06 04:32:01 +00:00
|
|
|
|
|
|
|
j12 := mock.Job()
|
|
|
|
j12.TaskGroups[0].Tasks[0].Resources.Networks[0].MBits = 100
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j12.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
j13 := mock.Job()
|
|
|
|
j13.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts[0].Label = "foobar"
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j13.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
j14 := mock.Job()
|
|
|
|
j14.TaskGroups[0].Tasks[0].Resources.Networks[0].ReservedPorts = []structs.Port{{Label: "foo", Value: 1312}}
|
|
|
|
if !tasksUpdated(j1.TaskGroups[0], j14.TaskGroups[0]) {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
2015-09-07 19:25:23 +00:00
|
|
|
}
|
2015-10-16 21:00:51 +00:00
|
|
|
|
2015-10-16 23:35:55 +00:00
|
|
|
func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) {
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
allocs := []allocTuple{
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
}
|
|
|
|
diff := &diffResult{}
|
|
|
|
|
|
|
|
limit := 2
|
|
|
|
if !evictAndPlace(ctx, diff, allocs, "", &limit) {
|
|
|
|
t.Fatal("evictAndReplace() should have returned true")
|
|
|
|
}
|
|
|
|
|
|
|
|
if limit != 0 {
|
2015-10-21 12:47:36 +00:00
|
|
|
t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit)
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(diff.place) != 2 {
|
2015-10-21 12:47:36 +00:00
|
|
|
t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) {
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
allocs := []allocTuple{
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
}
|
|
|
|
diff := &diffResult{}
|
|
|
|
|
|
|
|
limit := 4
|
|
|
|
if evictAndPlace(ctx, diff, allocs, "", &limit) {
|
|
|
|
t.Fatal("evictAndReplace() should have returned false")
|
|
|
|
}
|
|
|
|
|
|
|
|
if limit != 0 {
|
2015-10-21 12:47:36 +00:00
|
|
|
t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit)
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(diff.place) != 4 {
|
2015-10-21 12:47:36 +00:00
|
|
|
t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSetStatus(t *testing.T) {
|
|
|
|
h := NewHarness(t)
|
|
|
|
logger := log.New(os.Stderr, "", log.LstdFlags)
|
|
|
|
eval := mock.Eval()
|
|
|
|
status := "a"
|
|
|
|
desc := "b"
|
2016-05-27 18:26:14 +00:00
|
|
|
if err := setStatus(logger, h, eval, nil, nil, nil, status, desc); err != nil {
|
2015-10-16 23:35:55 +00:00
|
|
|
t.Fatalf("setStatus() failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("setStatus() didn't update plan: %v", h.Evals)
|
|
|
|
}
|
|
|
|
|
|
|
|
newEval := h.Evals[0]
|
|
|
|
if newEval.ID != eval.ID || newEval.Status != status || newEval.StatusDescription != desc {
|
|
|
|
t.Fatalf("setStatus() submited invalid eval: %v", newEval)
|
|
|
|
}
|
|
|
|
|
2016-05-19 20:09:52 +00:00
|
|
|
// Test next evals
|
2015-10-16 23:35:55 +00:00
|
|
|
h = NewHarness(t)
|
|
|
|
next := mock.Eval()
|
2016-05-27 18:26:14 +00:00
|
|
|
if err := setStatus(logger, h, eval, next, nil, nil, status, desc); err != nil {
|
2015-10-16 23:35:55 +00:00
|
|
|
t.Fatalf("setStatus() failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("setStatus() didn't update plan: %v", h.Evals)
|
|
|
|
}
|
|
|
|
|
|
|
|
newEval = h.Evals[0]
|
|
|
|
if newEval.NextEval != next.ID {
|
|
|
|
t.Fatalf("setStatus() didn't set nextEval correctly: %v", newEval)
|
|
|
|
}
|
2016-05-19 20:09:52 +00:00
|
|
|
|
|
|
|
// Test blocked evals
|
|
|
|
h = NewHarness(t)
|
|
|
|
blocked := mock.Eval()
|
2016-05-27 18:26:14 +00:00
|
|
|
if err := setStatus(logger, h, eval, nil, blocked, nil, status, desc); err != nil {
|
2016-05-19 20:09:52 +00:00
|
|
|
t.Fatalf("setStatus() failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("setStatus() didn't update plan: %v", h.Evals)
|
|
|
|
}
|
|
|
|
|
|
|
|
newEval = h.Evals[0]
|
2016-05-25 01:12:59 +00:00
|
|
|
if newEval.BlockedEval != blocked.ID {
|
|
|
|
t.Fatalf("setStatus() didn't set BlockedEval correctly: %v", newEval)
|
2016-05-19 20:09:52 +00:00
|
|
|
}
|
2016-05-27 18:26:14 +00:00
|
|
|
|
|
|
|
// Test metrics
|
|
|
|
h = NewHarness(t)
|
|
|
|
metrics := map[string]*structs.AllocMetric{"foo": nil}
|
|
|
|
if err := setStatus(logger, h, eval, nil, nil, metrics, status, desc); err != nil {
|
|
|
|
t.Fatalf("setStatus() failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(h.Evals) != 1 {
|
|
|
|
t.Fatalf("setStatus() didn't update plan: %v", h.Evals)
|
|
|
|
}
|
|
|
|
|
|
|
|
newEval = h.Evals[0]
|
|
|
|
if !reflect.DeepEqual(newEval.FailedTGAllocs, metrics) {
|
|
|
|
t.Fatalf("setStatus() didn't set failed task group metrics correctly: %v", newEval)
|
|
|
|
}
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
|
|
|
|
state, ctx := testContext(t)
|
|
|
|
eval := mock.Eval()
|
|
|
|
job := mock.Job()
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
noErr(t, state.UpsertNode(1000, node))
|
|
|
|
|
|
|
|
// Register an alloc
|
|
|
|
alloc := &structs.Allocation{
|
|
|
|
ID: structs.GenerateUUID(),
|
|
|
|
EvalID: eval.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
|
|
|
Resources: &structs.Resources{
|
|
|
|
CPU: 2048,
|
|
|
|
MemoryMB: 2048,
|
|
|
|
},
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
}
|
|
|
|
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
|
|
|
noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
|
|
|
|
|
|
|
|
// Create a new task group that prevents in-place updates.
|
|
|
|
tg := &structs.TaskGroup{}
|
|
|
|
*tg = *job.TaskGroups[0]
|
|
|
|
task := &structs.Task{Name: "FOO"}
|
|
|
|
tg.Tasks = nil
|
|
|
|
tg.Tasks = append(tg.Tasks, task)
|
|
|
|
|
|
|
|
updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
|
2015-10-17 00:05:23 +00:00
|
|
|
stack := NewGenericStack(false, ctx)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Do the inplace update.
|
2016-05-17 22:37:37 +00:00
|
|
|
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
2016-05-17 22:37:37 +00:00
|
|
|
if len(unplaced) != 1 || len(inplace) != 0 {
|
2015-10-16 23:35:55 +00:00
|
|
|
t.Fatal("inplaceUpdate incorrectly did an inplace update")
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ctx.plan.NodeAllocation) != 0 {
|
|
|
|
t.Fatal("inplaceUpdate incorrectly did an inplace update")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestInplaceUpdate_NoMatch(t *testing.T) {
|
|
|
|
state, ctx := testContext(t)
|
|
|
|
eval := mock.Eval()
|
|
|
|
job := mock.Job()
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
noErr(t, state.UpsertNode(1000, node))
|
|
|
|
|
|
|
|
// Register an alloc
|
|
|
|
alloc := &structs.Allocation{
|
|
|
|
ID: structs.GenerateUUID(),
|
|
|
|
EvalID: eval.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
|
|
|
Resources: &structs.Resources{
|
|
|
|
CPU: 2048,
|
|
|
|
MemoryMB: 2048,
|
|
|
|
},
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
}
|
|
|
|
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
|
|
|
noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
|
|
|
|
|
|
|
|
// Create a new task group that requires too much resources.
|
|
|
|
tg := &structs.TaskGroup{}
|
|
|
|
*tg = *job.TaskGroups[0]
|
|
|
|
resource := &structs.Resources{CPU: 9999}
|
|
|
|
tg.Tasks[0].Resources = resource
|
|
|
|
|
|
|
|
updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
|
2015-10-17 00:05:23 +00:00
|
|
|
stack := NewGenericStack(false, ctx)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Do the inplace update.
|
2016-05-17 22:37:37 +00:00
|
|
|
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
2016-05-17 22:37:37 +00:00
|
|
|
if len(unplaced) != 1 || len(inplace) != 0 {
|
2015-10-16 23:35:55 +00:00
|
|
|
t.Fatal("inplaceUpdate incorrectly did an inplace update")
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ctx.plan.NodeAllocation) != 0 {
|
|
|
|
t.Fatal("inplaceUpdate incorrectly did an inplace update")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestInplaceUpdate_Success(t *testing.T) {
|
|
|
|
state, ctx := testContext(t)
|
|
|
|
eval := mock.Eval()
|
|
|
|
job := mock.Job()
|
|
|
|
|
|
|
|
node := mock.Node()
|
|
|
|
noErr(t, state.UpsertNode(1000, node))
|
|
|
|
|
|
|
|
// Register an alloc
|
|
|
|
alloc := &structs.Allocation{
|
2015-12-15 01:06:58 +00:00
|
|
|
ID: structs.GenerateUUID(),
|
|
|
|
EvalID: eval.ID,
|
|
|
|
NodeID: node.ID,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
|
|
|
TaskGroup: job.TaskGroups[0].Name,
|
2015-10-16 23:35:55 +00:00
|
|
|
Resources: &structs.Resources{
|
|
|
|
CPU: 2048,
|
|
|
|
MemoryMB: 2048,
|
|
|
|
},
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
}
|
|
|
|
alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
|
|
|
|
noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
|
|
|
|
|
|
|
|
// Create a new task group that updates the resources.
|
|
|
|
tg := &structs.TaskGroup{}
|
|
|
|
*tg = *job.TaskGroups[0]
|
|
|
|
resource := &structs.Resources{CPU: 737}
|
|
|
|
tg.Tasks[0].Resources = resource
|
2016-06-12 23:36:49 +00:00
|
|
|
newServices := []*structs.Service{
|
2015-12-15 17:14:32 +00:00
|
|
|
{
|
|
|
|
Name: "dummy-service",
|
|
|
|
PortLabel: "http",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "dummy-service2",
|
|
|
|
PortLabel: "http",
|
|
|
|
},
|
|
|
|
}
|
2015-12-15 18:43:56 +00:00
|
|
|
|
|
|
|
// Delete service 2
|
2016-06-12 23:36:49 +00:00
|
|
|
tg.Tasks[0].Services = tg.Tasks[0].Services[:1]
|
2015-12-15 18:43:56 +00:00
|
|
|
|
|
|
|
// Add the new services
|
2016-06-12 23:36:49 +00:00
|
|
|
tg.Tasks[0].Services = append(tg.Tasks[0].Services, newServices...)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
|
2015-10-17 00:05:23 +00:00
|
|
|
stack := NewGenericStack(false, ctx)
|
2015-10-22 23:45:03 +00:00
|
|
|
stack.SetJob(job)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
|
|
|
// Do the inplace update.
|
2016-05-17 22:37:37 +00:00
|
|
|
unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
|
2015-10-16 23:35:55 +00:00
|
|
|
|
2016-05-17 22:37:37 +00:00
|
|
|
if len(unplaced) != 0 || len(inplace) != 1 {
|
2015-10-16 23:35:55 +00:00
|
|
|
t.Fatal("inplaceUpdate did not do an inplace update")
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ctx.plan.NodeAllocation) != 1 {
|
|
|
|
t.Fatal("inplaceUpdate did not do an inplace update")
|
|
|
|
}
|
2015-12-15 16:35:26 +00:00
|
|
|
|
2016-05-17 22:37:37 +00:00
|
|
|
if inplace[0].Alloc.ID != alloc.ID {
|
|
|
|
t.Fatalf("inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace)
|
|
|
|
}
|
|
|
|
|
2015-12-15 16:35:26 +00:00
|
|
|
// Get the alloc we inserted.
|
2016-06-09 06:51:12 +00:00
|
|
|
a := inplace[0].Alloc // TODO(sean@): Verify this is correct vs: ctx.plan.NodeAllocation[alloc.NodeID][0]
|
|
|
|
if a.Job == nil {
|
|
|
|
t.Fatalf("bad")
|
2015-12-15 17:14:32 +00:00
|
|
|
}
|
|
|
|
|
2016-06-09 06:51:12 +00:00
|
|
|
if len(a.Job.TaskGroups) != 1 {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(a.Job.TaskGroups[0].Tasks) != 1 {
|
|
|
|
t.Fatalf("bad")
|
2015-12-15 16:35:26 +00:00
|
|
|
}
|
2015-12-15 18:43:56 +00:00
|
|
|
|
2016-06-12 23:36:49 +00:00
|
|
|
if len(a.Job.TaskGroups[0].Tasks[0].Services) != 3 {
|
|
|
|
t.Fatalf("Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services))
|
2016-06-09 06:51:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
serviceNames := make(map[string]struct{}, 3)
|
2016-06-12 23:36:49 +00:00
|
|
|
for _, consulService := range a.Job.TaskGroups[0].Tasks[0].Services {
|
2016-06-09 06:51:12 +00:00
|
|
|
serviceNames[consulService.Name] = struct{}{}
|
|
|
|
}
|
|
|
|
if len(serviceNames) != 3 {
|
|
|
|
t.Fatalf("bad")
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, name := range []string{"dummy-service", "dummy-service2", "web-frontend"} {
|
|
|
|
if _, found := serviceNames[name]; !found {
|
|
|
|
t.Errorf("Expected consul service name missing: %v", name)
|
|
|
|
}
|
2015-12-15 18:43:56 +00:00
|
|
|
}
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) {
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
allocs := []allocTuple{
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
|
|
|
|
}
|
|
|
|
diff := &diffResult{}
|
|
|
|
|
|
|
|
limit := 6
|
|
|
|
if evictAndPlace(ctx, diff, allocs, "", &limit) {
|
|
|
|
t.Fatal("evictAndReplace() should have returned false")
|
|
|
|
}
|
|
|
|
|
|
|
|
if limit != 2 {
|
2015-10-21 12:47:36 +00:00
|
|
|
t.Fatalf("evictAndReplace() should decremented limit; got %v; want 2", limit)
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(diff.place) != 4 {
|
2015-10-21 12:47:36 +00:00
|
|
|
t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
|
2015-10-16 23:35:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-16 21:00:51 +00:00
|
|
|
func TestTaskGroupConstraints(t *testing.T) {
|
2015-10-27 21:31:14 +00:00
|
|
|
constr := &structs.Constraint{RTarget: "bar"}
|
2015-10-16 21:00:51 +00:00
|
|
|
constr2 := &structs.Constraint{LTarget: "foo"}
|
2015-10-27 21:31:14 +00:00
|
|
|
constr3 := &structs.Constraint{Operand: "<"}
|
2015-10-16 21:00:51 +00:00
|
|
|
|
|
|
|
tg := &structs.TaskGroup{
|
|
|
|
Name: "web",
|
|
|
|
Count: 10,
|
|
|
|
Constraints: []*structs.Constraint{constr},
|
|
|
|
Tasks: []*structs.Task{
|
|
|
|
&structs.Task{
|
|
|
|
Driver: "exec",
|
|
|
|
Resources: &structs.Resources{
|
|
|
|
CPU: 500,
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
Constraints: []*structs.Constraint{constr2},
|
|
|
|
},
|
|
|
|
&structs.Task{
|
|
|
|
Driver: "docker",
|
|
|
|
Resources: &structs.Resources{
|
|
|
|
CPU: 500,
|
|
|
|
MemoryMB: 256,
|
|
|
|
},
|
|
|
|
Constraints: []*structs.Constraint{constr3},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build the expected values.
|
|
|
|
expConstr := []*structs.Constraint{constr, constr2, constr3}
|
|
|
|
expDrivers := map[string]struct{}{"exec": struct{}{}, "docker": struct{}{}}
|
|
|
|
expSize := &structs.Resources{
|
|
|
|
CPU: 1000,
|
|
|
|
MemoryMB: 512,
|
|
|
|
}
|
|
|
|
|
|
|
|
actConstrains := taskGroupConstraints(tg)
|
|
|
|
if !reflect.DeepEqual(actConstrains.constraints, expConstr) {
|
|
|
|
t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(actConstrains.drivers, expDrivers) {
|
|
|
|
t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(actConstrains.size, expSize) {
|
|
|
|
t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.size, expSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2015-11-13 01:47:51 +00:00
|
|
|
|
2016-02-22 18:38:04 +00:00
|
|
|
func TestProgressMade(t *testing.T) {
|
|
|
|
noopPlan := &structs.PlanResult{}
|
|
|
|
if progressMade(nil) || progressMade(noopPlan) {
|
|
|
|
t.Fatal("no progress plan marked as making progress")
|
|
|
|
}
|
|
|
|
|
|
|
|
m := map[string][]*structs.Allocation{
|
|
|
|
"foo": []*structs.Allocation{mock.Alloc()},
|
|
|
|
}
|
|
|
|
both := &structs.PlanResult{
|
|
|
|
NodeAllocation: m,
|
2016-02-22 21:24:26 +00:00
|
|
|
NodeUpdate: m,
|
2016-02-22 18:38:04 +00:00
|
|
|
}
|
2016-02-22 21:24:26 +00:00
|
|
|
update := &structs.PlanResult{NodeUpdate: m}
|
|
|
|
alloc := &structs.PlanResult{NodeAllocation: m}
|
2016-02-22 18:38:04 +00:00
|
|
|
if !(progressMade(both) && progressMade(update) && progressMade(alloc)) {
|
|
|
|
t.Fatal("bad")
|
|
|
|
}
|
|
|
|
}
|
2016-05-05 18:21:58 +00:00
|
|
|
|
|
|
|
func TestDesiredUpdates(t *testing.T) {
|
|
|
|
tg1 := &structs.TaskGroup{Name: "foo"}
|
|
|
|
tg2 := &structs.TaskGroup{Name: "bar"}
|
2016-05-13 18:53:11 +00:00
|
|
|
a2 := &structs.Allocation{TaskGroup: "bar"}
|
2016-05-05 18:21:58 +00:00
|
|
|
|
|
|
|
place := []allocTuple{
|
|
|
|
allocTuple{TaskGroup: tg1},
|
|
|
|
allocTuple{TaskGroup: tg1},
|
|
|
|
allocTuple{TaskGroup: tg1},
|
|
|
|
allocTuple{TaskGroup: tg2},
|
|
|
|
}
|
|
|
|
stop := []allocTuple{
|
2016-05-11 22:36:28 +00:00
|
|
|
allocTuple{TaskGroup: tg2, Alloc: a2},
|
|
|
|
allocTuple{TaskGroup: tg2, Alloc: a2},
|
2016-05-05 18:21:58 +00:00
|
|
|
}
|
|
|
|
ignore := []allocTuple{
|
|
|
|
allocTuple{TaskGroup: tg1},
|
|
|
|
}
|
|
|
|
migrate := []allocTuple{
|
|
|
|
allocTuple{TaskGroup: tg2},
|
|
|
|
}
|
|
|
|
inplace := []allocTuple{
|
|
|
|
allocTuple{TaskGroup: tg1},
|
|
|
|
allocTuple{TaskGroup: tg1},
|
|
|
|
}
|
|
|
|
destructive := []allocTuple{
|
|
|
|
allocTuple{TaskGroup: tg1},
|
|
|
|
allocTuple{TaskGroup: tg2},
|
|
|
|
allocTuple{TaskGroup: tg2},
|
|
|
|
}
|
|
|
|
diff := &diffResult{
|
|
|
|
place: place,
|
|
|
|
stop: stop,
|
|
|
|
ignore: ignore,
|
|
|
|
migrate: migrate,
|
|
|
|
}
|
|
|
|
|
|
|
|
expected := map[string]*structs.DesiredUpdates{
|
|
|
|
"foo": {
|
|
|
|
Place: 3,
|
|
|
|
Ignore: 1,
|
|
|
|
InPlaceUpdate: 2,
|
|
|
|
DestructiveUpdate: 1,
|
|
|
|
},
|
|
|
|
"bar": {
|
|
|
|
Place: 1,
|
|
|
|
Stop: 2,
|
|
|
|
Migrate: 1,
|
|
|
|
DestructiveUpdate: 2,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
desired := desiredUpdates(diff, inplace, destructive)
|
|
|
|
if !reflect.DeepEqual(desired, expected) {
|
|
|
|
t.Fatalf("desiredUpdates() returned %#v; want %#v", desired, expected)
|
|
|
|
}
|
|
|
|
}
|