2015-08-13 17:13:11 +00:00
|
|
|
package scheduler
|
|
|
|
|
|
|
|
import (
|
2017-03-07 22:20:02 +00:00
|
|
|
"fmt"
|
2015-08-13 17:13:11 +00:00
|
|
|
"reflect"
|
|
|
|
"testing"
|
2018-02-22 22:24:34 +00:00
|
|
|
"time"
|
2015-08-13 17:13:11 +00:00
|
|
|
|
2017-09-29 16:58:48 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2015-08-13 17:13:11 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2018-10-14 01:38:08 +00:00
|
|
|
psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
|
2018-03-15 21:45:00 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2015-08-13 17:13:11 +00:00
|
|
|
)
|
|
|
|
|
2015-08-13 22:01:02 +00:00
|
|
|
func TestStaticIterator_Reset(t *testing.T) {
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
nodes = append(nodes, mock.Node())
|
|
|
|
}
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
|
|
|
for i := 0; i < 6; i++ {
|
|
|
|
static.Reset()
|
|
|
|
for j := 0; j < i; j++ {
|
|
|
|
static.Next()
|
|
|
|
}
|
|
|
|
static.Reset()
|
|
|
|
|
|
|
|
out := collectFeasible(static)
|
|
|
|
if len(out) != len(nodes) {
|
|
|
|
t.Fatalf("out: %#v", out)
|
|
|
|
t.Fatalf("missing nodes %d %#v", i, static)
|
|
|
|
}
|
|
|
|
|
|
|
|
ids := make(map[string]struct{})
|
|
|
|
for _, o := range out {
|
|
|
|
if _, ok := ids[o.ID]; ok {
|
|
|
|
t.Fatalf("duplicate")
|
|
|
|
}
|
|
|
|
ids[o.ID] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-07 18:26:16 +00:00
|
|
|
func TestStaticIterator_SetNodes(t *testing.T) {
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
nodes = append(nodes, mock.Node())
|
|
|
|
}
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
|
|
|
newNodes := []*structs.Node{mock.Node()}
|
|
|
|
static.SetNodes(newNodes)
|
|
|
|
|
|
|
|
out := collectFeasible(static)
|
|
|
|
if !reflect.DeepEqual(out, newNodes) {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-08-13 17:13:11 +00:00
|
|
|
func TestRandomIterator(t *testing.T) {
|
2015-08-13 18:33:58 +00:00
|
|
|
_, ctx := testContext(t)
|
2015-08-13 17:13:11 +00:00
|
|
|
var nodes []*structs.Node
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
nodes = append(nodes, mock.Node())
|
|
|
|
}
|
|
|
|
|
|
|
|
nc := make([]*structs.Node, len(nodes))
|
|
|
|
copy(nc, nodes)
|
|
|
|
rand := NewRandomIterator(ctx, nc)
|
|
|
|
|
2015-08-13 19:02:42 +00:00
|
|
|
out := collectFeasible(rand)
|
2015-08-13 17:13:11 +00:00
|
|
|
if len(out) != len(nodes) {
|
|
|
|
t.Fatalf("missing nodes")
|
|
|
|
}
|
|
|
|
if reflect.DeepEqual(out, nodes) {
|
|
|
|
t.Fatalf("same order")
|
|
|
|
}
|
|
|
|
}
|
2015-08-13 17:19:46 +00:00
|
|
|
|
2016-01-26 18:07:33 +00:00
|
|
|
func TestDriverChecker(t *testing.T) {
|
2015-08-13 18:33:58 +00:00
|
|
|
_, ctx := testContext(t)
|
2015-08-13 17:19:46 +00:00
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
2015-10-16 18:36:26 +00:00
|
|
|
mock.Node(),
|
2015-08-13 17:19:46 +00:00
|
|
|
}
|
2015-10-14 23:45:19 +00:00
|
|
|
nodes[0].Attributes["driver.foo"] = "1"
|
2015-10-16 18:36:26 +00:00
|
|
|
nodes[1].Attributes["driver.foo"] = "0"
|
|
|
|
nodes[2].Attributes["driver.foo"] = "true"
|
|
|
|
nodes[3].Attributes["driver.foo"] = "False"
|
2015-08-13 17:19:46 +00:00
|
|
|
|
|
|
|
drivers := map[string]struct{}{
|
2017-09-26 22:26:33 +00:00
|
|
|
"exec": {},
|
|
|
|
"foo": {},
|
2015-08-13 17:19:46 +00:00
|
|
|
}
|
2016-01-26 18:07:33 +00:00
|
|
|
checker := NewDriverChecker(ctx, drivers)
|
|
|
|
cases := []struct {
|
|
|
|
Node *structs.Node
|
|
|
|
Result bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Node: nodes[0],
|
|
|
|
Result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Node: nodes[1],
|
|
|
|
Result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Node: nodes[2],
|
|
|
|
Result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Node: nodes[3],
|
|
|
|
Result: false,
|
|
|
|
},
|
2015-08-13 17:19:46 +00:00
|
|
|
}
|
2016-01-26 18:07:33 +00:00
|
|
|
|
|
|
|
for i, c := range cases {
|
|
|
|
if act := checker.Feasible(c.Node); act != c.Result {
|
|
|
|
t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result)
|
|
|
|
}
|
2015-08-13 17:19:46 +00:00
|
|
|
}
|
|
|
|
}
|
2015-08-13 17:46:30 +00:00
|
|
|
|
2018-03-21 19:32:40 +00:00
|
|
|
func Test_HealthChecks(t *testing.T) {
|
2018-03-15 21:45:00 +00:00
|
|
|
require := require.New(t)
|
2018-02-22 22:24:34 +00:00
|
|
|
_, ctx := testContext(t)
|
2018-03-15 21:45:00 +00:00
|
|
|
|
2018-02-22 22:24:34 +00:00
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
}
|
2018-03-01 15:25:16 +00:00
|
|
|
for _, e := range nodes {
|
|
|
|
e.Drivers = make(map[string]*structs.DriverInfo)
|
|
|
|
}
|
2018-02-22 22:24:34 +00:00
|
|
|
nodes[0].Attributes["driver.foo"] = "1"
|
2018-02-27 19:57:10 +00:00
|
|
|
nodes[0].Drivers["foo"] = &structs.DriverInfo{
|
2018-02-22 22:24:34 +00:00
|
|
|
Detected: true,
|
|
|
|
Healthy: true,
|
|
|
|
HealthDescription: "running",
|
|
|
|
UpdateTime: time.Now(),
|
|
|
|
}
|
|
|
|
nodes[1].Attributes["driver.bar"] = "1"
|
2018-02-27 19:57:10 +00:00
|
|
|
nodes[1].Drivers["bar"] = &structs.DriverInfo{
|
2018-02-22 22:24:34 +00:00
|
|
|
Detected: true,
|
|
|
|
Healthy: false,
|
|
|
|
HealthDescription: "not running",
|
|
|
|
UpdateTime: time.Now(),
|
|
|
|
}
|
|
|
|
nodes[2].Attributes["driver.baz"] = "0"
|
2018-02-27 19:57:10 +00:00
|
|
|
nodes[2].Drivers["baz"] = &structs.DriverInfo{
|
2018-02-22 22:24:34 +00:00
|
|
|
Detected: false,
|
|
|
|
Healthy: false,
|
|
|
|
HealthDescription: "not running",
|
|
|
|
UpdateTime: time.Now(),
|
|
|
|
}
|
|
|
|
|
|
|
|
testDrivers := []string{"foo", "bar", "baz"}
|
|
|
|
cases := []struct {
|
|
|
|
Node *structs.Node
|
|
|
|
Result bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Node: nodes[0],
|
|
|
|
Result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Node: nodes[1],
|
|
|
|
Result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Node: nodes[2],
|
|
|
|
Result: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, c := range cases {
|
|
|
|
drivers := map[string]struct{}{
|
|
|
|
testDrivers[i]: {},
|
|
|
|
}
|
|
|
|
checker := NewDriverChecker(ctx, drivers)
|
2018-03-15 21:45:00 +00:00
|
|
|
act := checker.Feasible(c.Node)
|
|
|
|
require.Equal(act, c.Result)
|
2018-02-22 22:24:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-26 18:07:33 +00:00
|
|
|
func TestConstraintChecker(t *testing.T) {
|
2015-08-13 18:33:58 +00:00
|
|
|
_, ctx := testContext(t)
|
2015-08-13 17:46:30 +00:00
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
}
|
|
|
|
|
2015-08-28 08:30:47 +00:00
|
|
|
nodes[0].Attributes["kernel.name"] = "freebsd"
|
2015-08-13 17:46:30 +00:00
|
|
|
nodes[1].Datacenter = "dc2"
|
2018-11-13 23:57:59 +00:00
|
|
|
nodes[2].NodeClass = "large"
|
|
|
|
nodes[2].Attributes["foo"] = "bar"
|
2015-08-13 17:46:30 +00:00
|
|
|
|
|
|
|
constraints := []*structs.Constraint{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-08-13 17:46:30 +00:00
|
|
|
Operand: "=",
|
2016-02-05 00:50:20 +00:00
|
|
|
LTarget: "${node.datacenter}",
|
2015-08-13 17:46:30 +00:00
|
|
|
RTarget: "dc1",
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2015-08-13 17:46:30 +00:00
|
|
|
Operand: "is",
|
2016-02-05 00:50:20 +00:00
|
|
|
LTarget: "${attr.kernel.name}",
|
2015-08-13 17:46:30 +00:00
|
|
|
RTarget: "linux",
|
|
|
|
},
|
2018-11-13 21:28:10 +00:00
|
|
|
{
|
|
|
|
Operand: "!=",
|
2018-11-13 23:57:59 +00:00
|
|
|
LTarget: "${node.class}",
|
|
|
|
RTarget: "linux-medium-pci",
|
2018-11-13 21:28:10 +00:00
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2018-11-13 23:57:59 +00:00
|
|
|
Operand: "is_set",
|
|
|
|
LTarget: "${attr.foo}",
|
2015-12-22 01:15:34 +00:00
|
|
|
},
|
2015-08-13 17:46:30 +00:00
|
|
|
}
|
2016-01-26 18:07:33 +00:00
|
|
|
checker := NewConstraintChecker(ctx, constraints)
|
|
|
|
cases := []struct {
|
|
|
|
Node *structs.Node
|
|
|
|
Result bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Node: nodes[0],
|
|
|
|
Result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Node: nodes[1],
|
|
|
|
Result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Node: nodes[2],
|
|
|
|
Result: true,
|
|
|
|
},
|
2015-08-13 17:46:30 +00:00
|
|
|
}
|
2016-01-26 18:07:33 +00:00
|
|
|
|
|
|
|
for i, c := range cases {
|
|
|
|
if act := checker.Feasible(c.Node); act != c.Result {
|
|
|
|
t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result)
|
|
|
|
}
|
2015-08-13 17:46:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestResolveConstraintTarget(t *testing.T) {
|
|
|
|
type tcase struct {
|
|
|
|
target string
|
|
|
|
node *structs.Node
|
|
|
|
val interface{}
|
|
|
|
result bool
|
|
|
|
}
|
|
|
|
node := mock.Node()
|
|
|
|
cases := []tcase{
|
|
|
|
{
|
2016-02-05 00:50:20 +00:00
|
|
|
target: "${node.unique.id}",
|
2015-08-13 17:46:30 +00:00
|
|
|
node: node,
|
|
|
|
val: node.ID,
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
2016-02-05 00:50:20 +00:00
|
|
|
target: "${node.datacenter}",
|
2015-08-13 17:46:30 +00:00
|
|
|
node: node,
|
|
|
|
val: node.Datacenter,
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
2016-02-05 00:50:20 +00:00
|
|
|
target: "${node.unique.name}",
|
2015-08-13 17:46:30 +00:00
|
|
|
node: node,
|
|
|
|
val: node.Name,
|
|
|
|
result: true,
|
|
|
|
},
|
2015-12-22 01:15:34 +00:00
|
|
|
{
|
2016-02-05 00:50:20 +00:00
|
|
|
target: "${node.class}",
|
2015-12-22 01:15:34 +00:00
|
|
|
node: node,
|
|
|
|
val: node.NodeClass,
|
|
|
|
result: true,
|
|
|
|
},
|
2015-08-13 17:46:30 +00:00
|
|
|
{
|
2016-02-05 00:50:20 +00:00
|
|
|
target: "${node.foo}",
|
2015-08-13 17:46:30 +00:00
|
|
|
node: node,
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
2016-02-05 00:50:20 +00:00
|
|
|
target: "${attr.kernel.name}",
|
2015-08-13 17:46:30 +00:00
|
|
|
node: node,
|
2015-08-28 08:30:47 +00:00
|
|
|
val: node.Attributes["kernel.name"],
|
2015-08-13 17:46:30 +00:00
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
2016-02-05 00:50:20 +00:00
|
|
|
target: "${attr.rand}",
|
2015-08-13 17:46:30 +00:00
|
|
|
node: node,
|
2018-11-13 23:57:59 +00:00
|
|
|
val: "",
|
2015-08-13 17:46:30 +00:00
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
2016-02-05 00:50:20 +00:00
|
|
|
target: "${meta.pci-dss}",
|
2015-08-13 17:46:30 +00:00
|
|
|
node: node,
|
|
|
|
val: node.Meta["pci-dss"],
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
2016-02-05 00:50:20 +00:00
|
|
|
target: "${meta.rand}",
|
2015-08-13 17:46:30 +00:00
|
|
|
node: node,
|
2018-11-13 23:57:59 +00:00
|
|
|
val: "",
|
2015-08-13 17:46:30 +00:00
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
2018-07-16 13:47:18 +00:00
|
|
|
res, ok := resolveTarget(tc.target, tc.node)
|
2015-08-13 17:46:30 +00:00
|
|
|
if ok != tc.result {
|
|
|
|
t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok)
|
|
|
|
}
|
|
|
|
if ok && !reflect.DeepEqual(res, tc.val) {
|
|
|
|
t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCheckConstraint(t *testing.T) {
|
|
|
|
type tcase struct {
|
|
|
|
op string
|
|
|
|
lVal, rVal interface{}
|
|
|
|
result bool
|
|
|
|
}
|
|
|
|
cases := []tcase{
|
|
|
|
{
|
|
|
|
op: "=",
|
|
|
|
lVal: "foo", rVal: "foo",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "is",
|
|
|
|
lVal: "foo", rVal: "foo",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "==",
|
|
|
|
lVal: "foo", rVal: "foo",
|
|
|
|
result: true,
|
|
|
|
},
|
2018-11-13 21:28:10 +00:00
|
|
|
{
|
|
|
|
op: "==",
|
|
|
|
lVal: "foo", rVal: nil,
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "==",
|
|
|
|
lVal: nil, rVal: "foo",
|
|
|
|
result: false,
|
|
|
|
},
|
2018-11-13 23:57:59 +00:00
|
|
|
{
|
|
|
|
op: "==",
|
|
|
|
lVal: nil, rVal: nil,
|
|
|
|
result: false,
|
|
|
|
},
|
2015-08-13 17:46:30 +00:00
|
|
|
{
|
|
|
|
op: "!=",
|
|
|
|
lVal: "foo", rVal: "foo",
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "!=",
|
|
|
|
lVal: "foo", rVal: "bar",
|
|
|
|
result: true,
|
|
|
|
},
|
2018-11-13 21:28:10 +00:00
|
|
|
{
|
|
|
|
op: "!=",
|
|
|
|
lVal: nil, rVal: "foo",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "!=",
|
|
|
|
lVal: "foo", rVal: nil,
|
|
|
|
result: true,
|
|
|
|
},
|
2018-11-13 23:57:59 +00:00
|
|
|
{
|
|
|
|
op: "!=",
|
|
|
|
lVal: nil, rVal: nil,
|
|
|
|
result: false,
|
|
|
|
},
|
2015-08-13 17:46:30 +00:00
|
|
|
{
|
|
|
|
op: "not",
|
|
|
|
lVal: "foo", rVal: "bar",
|
|
|
|
result: true,
|
|
|
|
},
|
2015-10-11 19:12:39 +00:00
|
|
|
{
|
2015-10-26 20:47:56 +00:00
|
|
|
op: structs.ConstraintVersion,
|
2015-10-11 19:12:39 +00:00
|
|
|
lVal: "1.2.3", rVal: "~> 1.0",
|
|
|
|
result: true,
|
|
|
|
},
|
2018-11-13 21:28:10 +00:00
|
|
|
{
|
|
|
|
op: structs.ConstraintVersion,
|
|
|
|
lVal: nil, rVal: "~> 1.0",
|
|
|
|
result: false,
|
|
|
|
},
|
2015-10-11 19:35:13 +00:00
|
|
|
{
|
2015-10-26 20:47:56 +00:00
|
|
|
op: structs.ConstraintRegex,
|
2015-10-11 19:35:13 +00:00
|
|
|
lVal: "foobarbaz", rVal: "[\\w]+",
|
|
|
|
result: true,
|
|
|
|
},
|
2018-11-13 21:28:10 +00:00
|
|
|
{
|
|
|
|
op: structs.ConstraintRegex,
|
|
|
|
lVal: nil, rVal: "[\\w]+",
|
|
|
|
result: false,
|
|
|
|
},
|
2015-10-11 19:57:06 +00:00
|
|
|
{
|
|
|
|
op: "<",
|
|
|
|
lVal: "foo", rVal: "bar",
|
|
|
|
result: false,
|
|
|
|
},
|
2018-11-13 21:28:10 +00:00
|
|
|
{
|
|
|
|
op: "<",
|
|
|
|
lVal: nil, rVal: "bar",
|
|
|
|
result: false,
|
|
|
|
},
|
2016-10-19 20:06:28 +00:00
|
|
|
{
|
|
|
|
op: structs.ConstraintSetContains,
|
|
|
|
lVal: "foo,bar,baz", rVal: "foo, bar ",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: structs.ConstraintSetContains,
|
|
|
|
lVal: "foo,bar,baz", rVal: "foo,bam",
|
|
|
|
result: false,
|
|
|
|
},
|
2018-11-13 23:57:59 +00:00
|
|
|
{
|
|
|
|
op: structs.ConstraintAttributeIsSet,
|
|
|
|
lVal: "foo",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: structs.ConstraintAttributeIsSet,
|
|
|
|
lVal: nil,
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: structs.ConstraintAttributeIsNotSet,
|
|
|
|
lVal: nil,
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: structs.ConstraintAttributeIsNotSet,
|
|
|
|
lVal: "foo",
|
|
|
|
result: false,
|
|
|
|
},
|
2015-08-13 17:46:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
2015-10-13 03:15:07 +00:00
|
|
|
_, ctx := testContext(t)
|
2018-11-13 23:57:59 +00:00
|
|
|
if res := checkConstraint(ctx, tc.op, tc.lVal, tc.rVal, tc.lVal != nil, tc.rVal != nil); res != tc.result {
|
2015-08-13 17:46:30 +00:00
|
|
|
t.Fatalf("TC: %#v, Result: %v", tc, res)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-08-13 19:02:42 +00:00
|
|
|
|
2015-10-11 19:57:06 +00:00
|
|
|
func TestCheckLexicalOrder(t *testing.T) {
|
|
|
|
type tcase struct {
|
|
|
|
op string
|
|
|
|
lVal, rVal interface{}
|
|
|
|
result bool
|
|
|
|
}
|
|
|
|
cases := []tcase{
|
|
|
|
{
|
|
|
|
op: "<",
|
|
|
|
lVal: "bar", rVal: "foo",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "<=",
|
|
|
|
lVal: "foo", rVal: "foo",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: ">",
|
|
|
|
lVal: "bar", rVal: "foo",
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: ">=",
|
|
|
|
lVal: "bar", rVal: "bar",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: ">",
|
|
|
|
lVal: 1, rVal: "foo",
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tc := range cases {
|
|
|
|
if res := checkLexicalOrder(tc.op, tc.lVal, tc.rVal); res != tc.result {
|
|
|
|
t.Fatalf("TC: %#v, Result: %v", tc, res)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-11 19:12:39 +00:00
|
|
|
func TestCheckVersionConstraint(t *testing.T) {
|
|
|
|
type tcase struct {
|
|
|
|
lVal, rVal interface{}
|
|
|
|
result bool
|
|
|
|
}
|
|
|
|
cases := []tcase{
|
|
|
|
{
|
|
|
|
lVal: "1.2.3", rVal: "~> 1.0",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
lVal: "1.2.3", rVal: ">= 1.0, < 1.4",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
lVal: "2.0.1", rVal: "~> 1.0",
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
lVal: "1.4", rVal: ">= 1.0, < 1.4",
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
lVal: 1, rVal: "~> 1.0",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tc := range cases {
|
2015-10-13 03:15:07 +00:00
|
|
|
_, ctx := testContext(t)
|
2018-07-16 13:47:18 +00:00
|
|
|
if res := checkVersionMatch(ctx, tc.lVal, tc.rVal); res != tc.result {
|
2015-10-11 19:12:39 +00:00
|
|
|
t.Fatalf("TC: %#v, Result: %v", tc, res)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-11 19:35:13 +00:00
|
|
|
func TestCheckRegexpConstraint(t *testing.T) {
|
|
|
|
type tcase struct {
|
|
|
|
lVal, rVal interface{}
|
|
|
|
result bool
|
|
|
|
}
|
|
|
|
cases := []tcase{
|
|
|
|
{
|
|
|
|
lVal: "foobar", rVal: "bar",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
lVal: "foobar", rVal: "^foo",
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
lVal: "foobar", rVal: "^bar",
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
lVal: "zipzap", rVal: "foo",
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
lVal: 1, rVal: "foo",
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, tc := range cases {
|
2015-10-13 03:15:07 +00:00
|
|
|
_, ctx := testContext(t)
|
2018-07-16 13:47:18 +00:00
|
|
|
if res := checkRegexpMatch(ctx, tc.lVal, tc.rVal); res != tc.result {
|
2015-10-11 19:35:13 +00:00
|
|
|
t.Fatalf("TC: %#v, Result: %v", tc, res)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
// This test puts allocations on the node to test if it detects infeasibility of
|
|
|
|
// nodes correctly and picks the only feasible one
|
2017-03-09 03:00:10 +00:00
|
|
|
func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) {
|
2015-10-22 21:31:12 +00:00
|
|
|
_, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
}
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
2015-10-26 20:47:56 +00:00
|
|
|
// Create a job with a distinct_hosts constraint and two task groups.
|
2015-10-22 21:31:12 +00:00
|
|
|
tg1 := &structs.TaskGroup{Name: "bar"}
|
|
|
|
tg2 := &structs.TaskGroup{Name: "baz"}
|
|
|
|
|
|
|
|
job := &structs.Job{
|
|
|
|
ID: "foo",
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2015-10-26 20:47:56 +00:00
|
|
|
Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}},
|
2015-10-22 21:31:12 +00:00
|
|
|
TaskGroups: []*structs.TaskGroup{tg1, tg2},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
|
2017-03-07 22:20:02 +00:00
|
|
|
// job unsatisfiable on all nodes but node3
|
2015-10-22 21:31:12 +00:00
|
|
|
plan := ctx.Plan()
|
|
|
|
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2015-10-22 21:31:12 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-10-22 21:31:12 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2015-10-22 21:31:12 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: "ignore 2",
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-10-22 21:31:12 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2015-10-22 21:31:12 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-10-22 21:31:12 +00:00
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2015-10-22 21:31:12 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: "ignore 2",
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2015-10-22 21:31:12 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
proposed := NewDistinctHostsIterator(ctx, static)
|
2017-03-07 22:20:02 +00:00
|
|
|
proposed.SetTaskGroup(tg1)
|
|
|
|
proposed.SetJob(job)
|
2015-10-22 21:31:12 +00:00
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
out := collectFeasible(proposed)
|
|
|
|
if len(out) != 1 {
|
2015-10-22 21:31:12 +00:00
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
if out[0].ID != nodes[2].ID {
|
|
|
|
t.Fatalf("wrong node picked")
|
|
|
|
}
|
2015-10-22 21:31:12 +00:00
|
|
|
}
|
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) {
|
2015-10-22 21:31:12 +00:00
|
|
|
_, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
}
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
2015-10-26 20:47:56 +00:00
|
|
|
// Create a job with a distinct_hosts constraint and three task groups.
|
2015-10-22 21:31:12 +00:00
|
|
|
tg1 := &structs.TaskGroup{Name: "bar"}
|
|
|
|
tg2 := &structs.TaskGroup{Name: "baz"}
|
|
|
|
tg3 := &structs.TaskGroup{Name: "bam"}
|
|
|
|
|
|
|
|
job := &structs.Job{
|
|
|
|
ID: "foo",
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2015-10-26 20:47:56 +00:00
|
|
|
Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}},
|
2015-10-22 21:31:12 +00:00
|
|
|
TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3},
|
|
|
|
}
|
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
|
|
|
|
// job unsatisfiable for tg3
|
|
|
|
plan := ctx.Plan()
|
|
|
|
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
proposed := NewDistinctHostsIterator(ctx, static)
|
2017-03-07 22:20:02 +00:00
|
|
|
proposed.SetTaskGroup(tg3)
|
|
|
|
proposed.SetJob(job)
|
2015-10-22 21:31:12 +00:00
|
|
|
|
|
|
|
// It should not be able to place 3 tasks with only two nodes.
|
2017-03-07 22:20:02 +00:00
|
|
|
out := collectFeasible(proposed)
|
|
|
|
if len(out) != 0 {
|
2015-10-22 21:31:12 +00:00
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) {
|
2015-10-22 21:31:12 +00:00
|
|
|
_, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
}
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
2015-10-26 20:47:56 +00:00
|
|
|
// Create a task group with a distinct_hosts constraint.
|
2017-03-07 22:20:02 +00:00
|
|
|
tg1 := &structs.TaskGroup{
|
2015-10-22 21:31:12 +00:00
|
|
|
Name: "example",
|
|
|
|
Constraints: []*structs.Constraint{
|
2015-10-26 20:47:56 +00:00
|
|
|
{Operand: structs.ConstraintDistinctHosts},
|
2015-10-22 21:31:12 +00:00
|
|
|
},
|
|
|
|
}
|
2017-03-07 22:20:02 +00:00
|
|
|
tg2 := &structs.TaskGroup{Name: "baz"}
|
2015-10-22 21:31:12 +00:00
|
|
|
|
|
|
|
// Add a planned alloc to node1.
|
|
|
|
plan := ctx.Plan()
|
|
|
|
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
2015-10-22 21:31:12 +00:00
|
|
|
JobID: "foo",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a planned alloc to node2 with the same task group name but a
|
|
|
|
// different job.
|
|
|
|
plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
2015-10-22 21:31:12 +00:00
|
|
|
JobID: "bar",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
proposed := NewDistinctHostsIterator(ctx, static)
|
2017-03-07 22:20:02 +00:00
|
|
|
proposed.SetTaskGroup(tg1)
|
2017-09-07 23:56:15 +00:00
|
|
|
proposed.SetJob(&structs.Job{
|
|
|
|
ID: "foo",
|
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
})
|
2015-10-22 21:31:12 +00:00
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
out := collectFeasible(proposed)
|
2015-10-22 21:31:12 +00:00
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Expect it to skip the first node as there is a previous alloc on it for
|
|
|
|
// the same task group.
|
|
|
|
if out[0] != nodes[1] {
|
|
|
|
t.Fatalf("Bad: %v", out)
|
|
|
|
}
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
// Since the other task group doesn't have the constraint, both nodes should
|
|
|
|
// be feasible.
|
|
|
|
proposed.Reset()
|
|
|
|
proposed.SetTaskGroup(tg2)
|
|
|
|
out = collectFeasible(proposed)
|
|
|
|
if len(out) != 2 {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This test puts creates allocations across task groups that use a property
|
|
|
|
// value to detect if the constraint at the job level properly considers all
|
|
|
|
// task groups.
|
2017-03-09 03:00:10 +00:00
|
|
|
func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
|
2017-03-07 22:20:02 +00:00
|
|
|
state, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, n := range nodes {
|
|
|
|
n.Meta["rack"] = fmt.Sprintf("%d", i)
|
|
|
|
|
|
|
|
// Add to state store
|
|
|
|
if err := state.UpsertNode(uint64(100+i), n); err != nil {
|
|
|
|
t.Fatalf("failed to upsert node: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
|
|
|
// Create a job with a distinct_property constraint and a task groups.
|
|
|
|
tg1 := &structs.TaskGroup{Name: "bar"}
|
|
|
|
tg2 := &structs.TaskGroup{Name: "baz"}
|
|
|
|
|
|
|
|
job := &structs.Job{
|
2017-09-07 23:56:15 +00:00
|
|
|
ID: "foo",
|
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.rack}",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
TaskGroups: []*structs.TaskGroup{tg1, tg2},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add allocs placing tg1 on node1 and 2 and tg2 on node3 and 4. This should make the
|
|
|
|
// job unsatisfiable on all nodes but node5. Also mix the allocations
|
|
|
|
// existing in the plan and the state store.
|
|
|
|
plan := ctx.Plan()
|
2017-09-29 16:58:48 +00:00
|
|
|
alloc1ID := uuid.Generate()
|
2017-03-07 22:20:02 +00:00
|
|
|
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-03-08 19:47:55 +00:00
|
|
|
ID: alloc1ID,
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: "ignore 2",
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[2].ID,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: "ignore 2",
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[2].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-03-08 19:47:55 +00:00
|
|
|
// Put an allocation on Node 5 but make it stopped in the plan
|
2017-09-29 16:58:48 +00:00
|
|
|
stoppingAllocID := uuid.Generate()
|
2017-03-08 19:47:55 +00:00
|
|
|
plan.NodeUpdate[nodes[4].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-08 19:47:55 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-03-08 19:47:55 +00:00
|
|
|
ID: stoppingAllocID,
|
|
|
|
NodeID: nodes[4].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
upserting := []*structs.Allocation{
|
2017-03-08 19:47:55 +00:00
|
|
|
// Have one of the allocations exist in both the plan and the state
|
|
|
|
// store. This resembles an allocation update
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-08 19:47:55 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-03-08 19:47:55 +00:00
|
|
|
ID: alloc1ID,
|
2017-09-29 16:58:48 +00:00
|
|
|
EvalID: uuid.Generate(),
|
2017-03-08 19:47:55 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: "ignore 2",
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[3].ID,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: "ignore 2",
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[3].ID,
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-08 19:47:55 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-03-08 19:47:55 +00:00
|
|
|
ID: stoppingAllocID,
|
2017-09-29 16:58:48 +00:00
|
|
|
EvalID: uuid.Generate(),
|
2017-03-08 19:47:55 +00:00
|
|
|
NodeID: nodes[4].ID,
|
|
|
|
},
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
if err := state.UpsertAllocs(1000, upserting); err != nil {
|
|
|
|
t.Fatalf("failed to UpsertAllocs: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
proposed := NewDistinctPropertyIterator(ctx, static)
|
2017-03-07 22:20:02 +00:00
|
|
|
proposed.SetJob(job)
|
2017-03-08 19:47:55 +00:00
|
|
|
proposed.SetTaskGroup(tg2)
|
|
|
|
proposed.Reset()
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
out := collectFeasible(proposed)
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
if out[0].ID != nodes[4].ID {
|
|
|
|
t.Fatalf("wrong node picked")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-31 23:44:17 +00:00
|
|
|
// This test creates allocations across task groups that use a property value to
|
|
|
|
// detect if the constraint at the job level properly considers all task groups
|
|
|
|
// when the constraint allows a count greater than one
|
|
|
|
func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
|
|
|
|
state, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, n := range nodes {
|
|
|
|
n.Meta["rack"] = fmt.Sprintf("%d", i)
|
|
|
|
|
|
|
|
// Add to state store
|
|
|
|
if err := state.UpsertNode(uint64(100+i), n); err != nil {
|
|
|
|
t.Fatalf("failed to upsert node: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
|
|
|
// Create a job with a distinct_property constraint and a task groups.
|
|
|
|
tg1 := &structs.TaskGroup{Name: "bar"}
|
|
|
|
tg2 := &structs.TaskGroup{Name: "baz"}
|
|
|
|
|
|
|
|
job := &structs.Job{
|
2017-09-07 23:56:15 +00:00
|
|
|
ID: "foo",
|
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.rack}",
|
|
|
|
RTarget: "2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
TaskGroups: []*structs.TaskGroup{tg1, tg2},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add allocs placing two allocations on both node 1 and 2 and only one on
|
|
|
|
// node 3. This should make the job unsatisfiable on all nodes but node5.
|
|
|
|
// Also mix the allocations existing in the plan and the state store.
|
|
|
|
plan := ctx.Plan()
|
2017-09-29 16:58:48 +00:00
|
|
|
alloc1ID := uuid.Generate()
|
2017-07-31 23:44:17 +00:00
|
|
|
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
|
|
|
ID: alloc1ID,
|
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
|
|
|
ID: alloc1ID,
|
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: "ignore 2",
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
|
|
|
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: "ignore 2",
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[2].ID,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: "ignore 2",
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[2].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put an allocation on Node 3 but make it stopped in the plan
|
2017-09-29 16:58:48 +00:00
|
|
|
stoppingAllocID := uuid.Generate()
|
2017-07-31 23:44:17 +00:00
|
|
|
plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
|
|
|
ID: stoppingAllocID,
|
|
|
|
NodeID: nodes[2].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
upserting := []*structs.Allocation{
|
|
|
|
// Have one of the allocations exist in both the plan and the state
|
|
|
|
// store. This resembles an allocation update
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
|
|
|
ID: alloc1ID,
|
2017-09-29 16:58:48 +00:00
|
|
|
EvalID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
|
|
|
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: "ignore 2",
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: "ignore 2",
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := state.UpsertAllocs(1000, upserting); err != nil {
|
|
|
|
t.Fatalf("failed to UpsertAllocs: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
proposed := NewDistinctPropertyIterator(ctx, static)
|
|
|
|
proposed.SetJob(job)
|
|
|
|
proposed.SetTaskGroup(tg2)
|
|
|
|
proposed.Reset()
|
|
|
|
|
|
|
|
out := collectFeasible(proposed)
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
if out[0].ID != nodes[2].ID {
|
|
|
|
t.Fatalf("wrong node picked")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-08 19:47:55 +00:00
|
|
|
// This test checks that if a node has an allocation on it that gets stopped,
|
|
|
|
// there is a plan to re-use that for a new allocation, that the next select
|
|
|
|
// won't select that node.
|
2017-03-09 03:00:10 +00:00
|
|
|
func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testing.T) {
|
2017-03-08 19:47:55 +00:00
|
|
|
state, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
}
|
|
|
|
|
|
|
|
nodes[0].Meta["rack"] = "1"
|
|
|
|
|
|
|
|
// Add to state store
|
|
|
|
if err := state.UpsertNode(uint64(100), nodes[0]); err != nil {
|
|
|
|
t.Fatalf("failed to upsert node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
|
|
|
// Create a job with a distinct_property constraint and a task groups.
|
|
|
|
tg1 := &structs.TaskGroup{Name: "bar"}
|
|
|
|
job := &structs.Job{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: "foo",
|
2017-03-08 19:47:55 +00:00
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.rack}",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
TaskGroups: []*structs.TaskGroup{tg1},
|
|
|
|
}
|
|
|
|
|
|
|
|
plan := ctx.Plan()
|
|
|
|
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-08 19:47:55 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-08 19:47:55 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-09-29 16:58:48 +00:00
|
|
|
stoppingAllocID := uuid.Generate()
|
2017-03-08 19:47:55 +00:00
|
|
|
plan.NodeUpdate[nodes[0].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-08 19:47:55 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-03-08 19:47:55 +00:00
|
|
|
ID: stoppingAllocID,
|
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
upserting := []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-08 19:47:55 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-03-08 19:47:55 +00:00
|
|
|
ID: stoppingAllocID,
|
2017-09-29 16:58:48 +00:00
|
|
|
EvalID: uuid.Generate(),
|
2017-03-08 19:47:55 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := state.UpsertAllocs(1000, upserting); err != nil {
|
|
|
|
t.Fatalf("failed to UpsertAllocs: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
proposed := NewDistinctPropertyIterator(ctx, static)
|
2017-03-08 19:47:55 +00:00
|
|
|
proposed.SetJob(job)
|
|
|
|
proposed.SetTaskGroup(tg1)
|
|
|
|
proposed.Reset()
|
|
|
|
|
|
|
|
out := collectFeasible(proposed)
|
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
// This test creates previous allocations selecting certain property values to
|
|
|
|
// test if it detects infeasibility of property values correctly and picks the
|
|
|
|
// only feasible one
|
2017-03-09 03:00:10 +00:00
|
|
|
func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) {
|
2017-03-07 22:20:02 +00:00
|
|
|
state, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, n := range nodes {
|
|
|
|
n.Meta["rack"] = fmt.Sprintf("%d", i)
|
|
|
|
|
|
|
|
// Add to state store
|
|
|
|
if err := state.UpsertNode(uint64(100+i), n); err != nil {
|
|
|
|
t.Fatalf("failed to upsert node: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
|
|
|
// Create a job with a distinct_property constraint and a task groups.
|
|
|
|
tg1 := &structs.TaskGroup{Name: "bar"}
|
|
|
|
tg2 := &structs.TaskGroup{Name: "baz"}
|
|
|
|
tg3 := &structs.TaskGroup{Name: "bam"}
|
|
|
|
|
|
|
|
job := &structs.Job{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: "foo",
|
2017-03-07 22:20:02 +00:00
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.rack}",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
|
|
|
|
// job unsatisfiable for tg3.
|
|
|
|
plan := ctx.Plan()
|
|
|
|
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
upserting := []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := state.UpsertAllocs(1000, upserting); err != nil {
|
|
|
|
t.Fatalf("failed to UpsertAllocs: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
proposed := NewDistinctPropertyIterator(ctx, static)
|
2017-03-07 22:20:02 +00:00
|
|
|
proposed.SetJob(job)
|
2017-03-08 19:47:55 +00:00
|
|
|
proposed.SetTaskGroup(tg3)
|
|
|
|
proposed.Reset()
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
out := collectFeasible(proposed)
|
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-31 23:44:17 +00:00
|
|
|
// This test creates previous allocations selecting certain property values to
|
|
|
|
// test if it detects infeasibility of property values correctly and picks the
|
|
|
|
// only feasible one
|
|
|
|
func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testing.T) {
|
|
|
|
state, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, n := range nodes {
|
|
|
|
n.Meta["rack"] = fmt.Sprintf("%d", i)
|
|
|
|
|
|
|
|
// Add to state store
|
|
|
|
if err := state.UpsertNode(uint64(100+i), n); err != nil {
|
|
|
|
t.Fatalf("failed to upsert node: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
|
|
|
// Create a job with a distinct_property constraint and a task groups.
|
|
|
|
tg1 := &structs.TaskGroup{Name: "bar"}
|
|
|
|
tg2 := &structs.TaskGroup{Name: "baz"}
|
|
|
|
tg3 := &structs.TaskGroup{Name: "bam"}
|
|
|
|
|
|
|
|
job := &structs.Job{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
|
|
|
ID: "foo",
|
2017-07-31 23:44:17 +00:00
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.rack}",
|
|
|
|
RTarget: "2",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add allocs placing two tg1's on node1 and two tg2's on node2. This should
|
|
|
|
// make the job unsatisfiable for tg3.
|
|
|
|
plan := ctx.Plan()
|
|
|
|
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
upserting := []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-07-31 23:44:17 +00:00
|
|
|
TaskGroup: tg2.Name,
|
|
|
|
JobID: job.ID,
|
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-07-31 23:44:17 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
if err := state.UpsertAllocs(1000, upserting); err != nil {
|
|
|
|
t.Fatalf("failed to UpsertAllocs: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
proposed := NewDistinctPropertyIterator(ctx, static)
|
|
|
|
proposed.SetJob(job)
|
|
|
|
proposed.SetTaskGroup(tg3)
|
|
|
|
proposed.Reset()
|
|
|
|
|
|
|
|
out := collectFeasible(proposed)
|
|
|
|
if len(out) != 0 {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
// This test creates previous allocations selecting certain property values to
|
|
|
|
// test if it detects infeasibility of property values correctly and picks the
|
|
|
|
// only feasible one when the constraint is at the task group.
|
2017-03-09 03:00:10 +00:00
|
|
|
func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) {
|
2017-03-07 22:20:02 +00:00
|
|
|
state, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
mock.Node(),
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, n := range nodes {
|
|
|
|
n.Meta["rack"] = fmt.Sprintf("%d", i)
|
|
|
|
|
|
|
|
// Add to state store
|
|
|
|
if err := state.UpsertNode(uint64(100+i), n); err != nil {
|
|
|
|
t.Fatalf("failed to upsert node: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
|
|
|
|
|
|
|
// Create a job with a task group with the distinct_property constraint
|
|
|
|
tg1 := &structs.TaskGroup{
|
|
|
|
Name: "example",
|
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: structs.ConstraintDistinctProperty,
|
|
|
|
LTarget: "${meta.rack}",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
tg2 := &structs.TaskGroup{Name: "baz"}
|
|
|
|
|
|
|
|
job := &structs.Job{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
ID: "foo",
|
|
|
|
TaskGroups: []*structs.TaskGroup{tg1, tg2},
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add allocs placing tg1 on node1 and 2. This should make the
|
|
|
|
// job unsatisfiable on all nodes but node3. Also mix the allocations
|
|
|
|
// existing in the plan and the state store.
|
|
|
|
plan := ctx.Plan()
|
|
|
|
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[0].ID,
|
|
|
|
},
|
|
|
|
}
|
2017-03-08 19:47:55 +00:00
|
|
|
|
|
|
|
// Put an allocation on Node 3 but make it stopped in the plan
|
2017-09-29 16:58:48 +00:00
|
|
|
stoppingAllocID := uuid.Generate()
|
2017-03-08 19:47:55 +00:00
|
|
|
plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-08 19:47:55 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-03-08 19:47:55 +00:00
|
|
|
ID: stoppingAllocID,
|
|
|
|
NodeID: nodes[2].ID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
upserting := []*structs.Allocation{
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[1].ID,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Should be ignored as it is a different job.
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-07 22:20:02 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: "ignore 2",
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-09-29 16:58:48 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
EvalID: uuid.Generate(),
|
2017-03-07 22:20:02 +00:00
|
|
|
NodeID: nodes[2].ID,
|
|
|
|
},
|
2017-03-08 19:47:55 +00:00
|
|
|
|
2017-09-26 22:26:33 +00:00
|
|
|
{
|
2017-09-07 23:56:15 +00:00
|
|
|
Namespace: structs.DefaultNamespace,
|
2017-03-08 19:47:55 +00:00
|
|
|
TaskGroup: tg1.Name,
|
|
|
|
JobID: job.ID,
|
2017-05-01 20:54:26 +00:00
|
|
|
Job: job,
|
2017-03-08 19:47:55 +00:00
|
|
|
ID: stoppingAllocID,
|
2017-09-29 16:58:48 +00:00
|
|
|
EvalID: uuid.Generate(),
|
2017-03-08 19:47:55 +00:00
|
|
|
NodeID: nodes[2].ID,
|
|
|
|
},
|
2017-03-07 22:20:02 +00:00
|
|
|
}
|
|
|
|
if err := state.UpsertAllocs(1000, upserting); err != nil {
|
|
|
|
t.Fatalf("failed to UpsertAllocs: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-03-09 03:00:10 +00:00
|
|
|
proposed := NewDistinctPropertyIterator(ctx, static)
|
2017-03-07 22:20:02 +00:00
|
|
|
proposed.SetJob(job)
|
2017-03-08 19:47:55 +00:00
|
|
|
proposed.SetTaskGroup(tg1)
|
|
|
|
proposed.Reset()
|
2017-03-07 22:20:02 +00:00
|
|
|
|
|
|
|
out := collectFeasible(proposed)
|
|
|
|
if len(out) != 1 {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
|
|
|
if out[0].ID != nodes[2].ID {
|
|
|
|
t.Fatalf("wrong node picked")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since the other task group doesn't have the constraint, both nodes should
|
|
|
|
// be feasible.
|
|
|
|
proposed.SetTaskGroup(tg2)
|
2017-03-08 19:47:55 +00:00
|
|
|
proposed.Reset()
|
|
|
|
|
2017-03-07 22:20:02 +00:00
|
|
|
out = collectFeasible(proposed)
|
|
|
|
if len(out) != 3 {
|
|
|
|
t.Fatalf("Bad: %#v", out)
|
|
|
|
}
|
2015-10-22 21:31:12 +00:00
|
|
|
}
|
|
|
|
|
2015-08-13 19:02:42 +00:00
|
|
|
func collectFeasible(iter FeasibleIterator) (out []*structs.Node) {
|
|
|
|
for {
|
|
|
|
next := iter.Next()
|
|
|
|
if next == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
out = append(out, next)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2016-01-26 18:07:33 +00:00
|
|
|
|
|
|
|
// mockFeasibilityChecker is a FeasibilityChecker that returns predetermined
|
|
|
|
// feasibility values.
|
|
|
|
type mockFeasibilityChecker struct {
|
|
|
|
retVals []bool
|
|
|
|
i int
|
|
|
|
}
|
|
|
|
|
2018-03-11 18:07:09 +00:00
|
|
|
func newMockFeasibilityChecker(values ...bool) *mockFeasibilityChecker {
|
2016-01-26 18:07:33 +00:00
|
|
|
return &mockFeasibilityChecker{retVals: values}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *mockFeasibilityChecker) Feasible(*structs.Node) bool {
|
|
|
|
if c.i >= len(c.retVals) {
|
|
|
|
c.i++
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
f := c.retVals[c.i]
|
|
|
|
c.i++
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
|
|
|
// calls returns how many times the checker was called.
|
|
|
|
func (c *mockFeasibilityChecker) calls() int { return c.i }
|
|
|
|
|
|
|
|
func TestFeasibilityWrapper_JobIneligible(t *testing.T) {
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{mock.Node()}
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
2018-03-11 18:07:09 +00:00
|
|
|
mocked := newMockFeasibilityChecker(false)
|
2016-01-26 18:07:33 +00:00
|
|
|
wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil)
|
|
|
|
|
|
|
|
// Set the job to ineligible
|
|
|
|
ctx.Eligibility().SetJobEligibility(false, nodes[0].ComputedClass)
|
|
|
|
|
|
|
|
// Run the wrapper.
|
|
|
|
out := collectFeasible(wrapper)
|
|
|
|
|
|
|
|
if out != nil || mocked.calls() != 0 {
|
2016-01-27 01:34:41 +00:00
|
|
|
t.Fatalf("bad: %#v %d", out, mocked.calls())
|
2016-01-26 18:07:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestFeasibilityWrapper_JobEscapes(t *testing.T) {
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{mock.Node()}
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
2018-03-11 18:07:09 +00:00
|
|
|
mocked := newMockFeasibilityChecker(false)
|
2016-01-26 18:07:33 +00:00
|
|
|
wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil)
|
|
|
|
|
|
|
|
// Set the job to escaped
|
|
|
|
cc := nodes[0].ComputedClass
|
2016-01-27 00:43:42 +00:00
|
|
|
ctx.Eligibility().job[cc] = EvalComputedClassEscaped
|
2016-01-26 18:07:33 +00:00
|
|
|
|
|
|
|
// Run the wrapper.
|
|
|
|
out := collectFeasible(wrapper)
|
|
|
|
|
|
|
|
if out != nil || mocked.calls() != 1 {
|
|
|
|
t.Fatalf("bad: %#v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the job status didn't change from escaped even though the
|
|
|
|
// option failed.
|
|
|
|
if status := ctx.Eligibility().JobStatus(cc); status != EvalComputedClassEscaped {
|
|
|
|
t.Fatalf("job status is %v; want %v", status, EvalComputedClassEscaped)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestFeasibilityWrapper_JobAndTg_Eligible(t *testing.T) {
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{mock.Node()}
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
2018-03-11 18:07:09 +00:00
|
|
|
jobMock := newMockFeasibilityChecker(true)
|
|
|
|
tgMock := newMockFeasibilityChecker(false)
|
2016-01-26 18:07:33 +00:00
|
|
|
wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
|
|
|
|
|
|
|
|
// Set the job to escaped
|
|
|
|
cc := nodes[0].ComputedClass
|
2016-01-27 00:43:42 +00:00
|
|
|
ctx.Eligibility().job[cc] = EvalComputedClassEligible
|
2016-01-26 18:07:33 +00:00
|
|
|
ctx.Eligibility().SetTaskGroupEligibility(true, "foo", cc)
|
|
|
|
wrapper.SetTaskGroup("foo")
|
|
|
|
|
|
|
|
// Run the wrapper.
|
|
|
|
out := collectFeasible(wrapper)
|
|
|
|
|
|
|
|
if out == nil || tgMock.calls() != 0 {
|
|
|
|
t.Fatalf("bad: %#v %v", out, tgMock.calls())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestFeasibilityWrapper_JobEligible_TgIneligible(t *testing.T) {
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{mock.Node()}
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
2018-03-11 18:07:09 +00:00
|
|
|
jobMock := newMockFeasibilityChecker(true)
|
|
|
|
tgMock := newMockFeasibilityChecker(false)
|
2016-01-26 18:07:33 +00:00
|
|
|
wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
|
|
|
|
|
|
|
|
// Set the job to escaped
|
|
|
|
cc := nodes[0].ComputedClass
|
2016-01-27 00:43:42 +00:00
|
|
|
ctx.Eligibility().job[cc] = EvalComputedClassEligible
|
2016-01-26 18:07:33 +00:00
|
|
|
ctx.Eligibility().SetTaskGroupEligibility(false, "foo", cc)
|
|
|
|
wrapper.SetTaskGroup("foo")
|
|
|
|
|
|
|
|
// Run the wrapper.
|
|
|
|
out := collectFeasible(wrapper)
|
|
|
|
|
|
|
|
if out != nil || tgMock.calls() != 0 {
|
|
|
|
t.Fatalf("bad: %#v %v", out, tgMock.calls())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) {
|
|
|
|
_, ctx := testContext(t)
|
|
|
|
nodes := []*structs.Node{mock.Node()}
|
|
|
|
static := NewStaticIterator(ctx, nodes)
|
2018-03-11 18:07:09 +00:00
|
|
|
jobMock := newMockFeasibilityChecker(true)
|
|
|
|
tgMock := newMockFeasibilityChecker(true)
|
2016-01-26 18:07:33 +00:00
|
|
|
wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
|
|
|
|
|
|
|
|
// Set the job to escaped
|
|
|
|
cc := nodes[0].ComputedClass
|
2016-01-27 00:43:42 +00:00
|
|
|
ctx.Eligibility().job[cc] = EvalComputedClassEligible
|
|
|
|
ctx.Eligibility().taskGroups["foo"] =
|
2016-01-30 01:46:44 +00:00
|
|
|
map[string]ComputedClassFeasibility{cc: EvalComputedClassEscaped}
|
2016-01-26 18:07:33 +00:00
|
|
|
wrapper.SetTaskGroup("foo")
|
|
|
|
|
|
|
|
// Run the wrapper.
|
|
|
|
out := collectFeasible(wrapper)
|
|
|
|
|
|
|
|
if out == nil || tgMock.calls() != 1 {
|
|
|
|
t.Fatalf("bad: %#v %v", out, tgMock.calls())
|
|
|
|
}
|
|
|
|
|
2016-01-27 00:43:42 +00:00
|
|
|
if e, ok := ctx.Eligibility().taskGroups["foo"][cc]; !ok || e != EvalComputedClassEscaped {
|
2016-01-26 18:07:33 +00:00
|
|
|
t.Fatalf("bad: %v %v", e, ok)
|
|
|
|
}
|
|
|
|
}
|
2018-07-19 00:10:18 +00:00
|
|
|
|
|
|
|
func TestSetContainsAny(t *testing.T) {
|
|
|
|
require.True(t, checkSetContainsAny("a", "a"))
|
|
|
|
require.True(t, checkSetContainsAny("a,b", "a"))
|
|
|
|
require.True(t, checkSetContainsAny(" a,b ", "a "))
|
|
|
|
require.True(t, checkSetContainsAny("a", "a"))
|
|
|
|
require.False(t, checkSetContainsAny("b", "a"))
|
|
|
|
}
|
2018-10-10 17:32:44 +00:00
|
|
|
|
|
|
|
func TestDeviceChecker(t *testing.T) {
|
|
|
|
getTg := func(devices ...*structs.RequestedDevice) *structs.TaskGroup {
|
|
|
|
return &structs.TaskGroup{
|
|
|
|
Name: "example",
|
|
|
|
Tasks: []*structs.Task{
|
|
|
|
{
|
|
|
|
Resources: &structs.Resources{
|
|
|
|
Devices: devices,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Just type
|
|
|
|
gpuTypeReq := &structs.RequestedDevice{
|
|
|
|
Name: "gpu",
|
|
|
|
Count: 1,
|
|
|
|
}
|
|
|
|
fpgaTypeReq := &structs.RequestedDevice{
|
|
|
|
Name: "fpga",
|
|
|
|
Count: 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
// vendor/type
|
|
|
|
gpuVendorTypeReq := &structs.RequestedDevice{
|
|
|
|
Name: "nvidia/gpu",
|
|
|
|
Count: 1,
|
|
|
|
}
|
|
|
|
fpgaVendorTypeReq := &structs.RequestedDevice{
|
|
|
|
Name: "nvidia/fpga",
|
|
|
|
Count: 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
// vendor/type/model
|
|
|
|
gpuFullReq := &structs.RequestedDevice{
|
|
|
|
Name: "nvidia/gpu/1080ti",
|
|
|
|
Count: 1,
|
|
|
|
}
|
|
|
|
fpgaFullReq := &structs.RequestedDevice{
|
|
|
|
Name: "nvidia/fpga/F100",
|
|
|
|
Count: 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Just type but high count
|
|
|
|
gpuTypeHighCountReq := &structs.RequestedDevice{
|
|
|
|
Name: "gpu",
|
|
|
|
Count: 3,
|
|
|
|
}
|
|
|
|
|
|
|
|
getNode := func(devices ...*structs.NodeDeviceResource) *structs.Node {
|
|
|
|
n := mock.Node()
|
|
|
|
n.NodeResources.Devices = devices
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
nvidia := &structs.NodeDeviceResource{
|
|
|
|
Vendor: "nvidia",
|
|
|
|
Type: "gpu",
|
|
|
|
Name: "1080ti",
|
2018-10-14 01:38:08 +00:00
|
|
|
Attributes: map[string]*psstructs.Attribute{
|
|
|
|
"memory": psstructs.NewIntAttribute(4, psstructs.UnitGiB),
|
|
|
|
"pci_bandwidth": psstructs.NewIntAttribute(995, psstructs.UnitMiBPerS),
|
|
|
|
"cores_clock": psstructs.NewIntAttribute(800, psstructs.UnitMHz),
|
|
|
|
},
|
2018-10-10 17:32:44 +00:00
|
|
|
Instances: []*structs.NodeDevice{
|
2018-10-16 23:29:49 +00:00
|
|
|
{
|
2018-10-10 17:32:44 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
Healthy: true,
|
|
|
|
},
|
2018-10-16 23:29:49 +00:00
|
|
|
{
|
2018-10-10 17:32:44 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
Healthy: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
nvidiaUnhealthy := &structs.NodeDeviceResource{
|
|
|
|
Vendor: "nvidia",
|
|
|
|
Type: "gpu",
|
|
|
|
Name: "1080ti",
|
|
|
|
Instances: []*structs.NodeDevice{
|
2018-10-16 23:29:49 +00:00
|
|
|
{
|
2018-10-10 17:32:44 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
Healthy: false,
|
|
|
|
},
|
2018-10-16 23:29:49 +00:00
|
|
|
{
|
2018-10-10 17:32:44 +00:00
|
|
|
ID: uuid.Generate(),
|
|
|
|
Healthy: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
cases := []struct {
|
|
|
|
Name string
|
|
|
|
Result bool
|
|
|
|
NodeDevices []*structs.NodeDeviceResource
|
|
|
|
RequestedDevices []*structs.RequestedDevice
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
Name: "no devices on node",
|
|
|
|
Result: false,
|
|
|
|
NodeDevices: nil,
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{gpuTypeReq},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "no requested devices on empty node",
|
|
|
|
Result: true,
|
|
|
|
NodeDevices: nil,
|
|
|
|
RequestedDevices: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "gpu devices by type",
|
|
|
|
Result: true,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{gpuTypeReq},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "wrong devices by type",
|
|
|
|
Result: false,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{fpgaTypeReq},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "devices by type unhealthy node",
|
|
|
|
Result: false,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidiaUnhealthy},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{gpuTypeReq},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "gpu devices by vendor/type",
|
|
|
|
Result: true,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{gpuVendorTypeReq},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "wrong devices by vendor/type",
|
|
|
|
Result: false,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{fpgaVendorTypeReq},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "gpu devices by vendor/type/model",
|
|
|
|
Result: true,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{gpuFullReq},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "wrong devices by vendor/type/model",
|
|
|
|
Result: false,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{fpgaFullReq},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "too many requested",
|
|
|
|
Result: false,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{gpuTypeHighCountReq},
|
|
|
|
},
|
2018-10-14 01:38:08 +00:00
|
|
|
{
|
|
|
|
Name: "meets constraints requirement",
|
|
|
|
Result: true,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{
|
|
|
|
{
|
|
|
|
Name: "nvidia/gpu",
|
|
|
|
Count: 1,
|
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: "=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.model}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: "1080ti",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: ">",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: "1320.5 MB",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "<=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.pci_bandwidth}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: ".98 GiB/s",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.cores_clock}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: "800MHz",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-10-15 22:31:13 +00:00
|
|
|
{
|
|
|
|
Name: "meets constraints requirement multiple count",
|
|
|
|
Result: true,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{
|
|
|
|
{
|
|
|
|
Name: "nvidia/gpu",
|
|
|
|
Count: 2,
|
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: "=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.model}",
|
2018-10-15 22:31:13 +00:00
|
|
|
RTarget: "1080ti",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: ">",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory}",
|
2018-10-15 22:31:13 +00:00
|
|
|
RTarget: "1320.5 MB",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "<=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.pci_bandwidth}",
|
2018-10-15 22:31:13 +00:00
|
|
|
RTarget: ".98 GiB/s",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.cores_clock}",
|
2018-10-15 22:31:13 +00:00
|
|
|
RTarget: "800MHz",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "meets constraints requirement over count",
|
|
|
|
Result: false,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{
|
|
|
|
{
|
|
|
|
Name: "nvidia/gpu",
|
|
|
|
Count: 5,
|
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: "=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.model}",
|
2018-10-15 22:31:13 +00:00
|
|
|
RTarget: "1080ti",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: ">",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory}",
|
2018-10-15 22:31:13 +00:00
|
|
|
RTarget: "1320.5 MB",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "<=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.pci_bandwidth}",
|
2018-10-15 22:31:13 +00:00
|
|
|
RTarget: ".98 GiB/s",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.cores_clock}",
|
2018-10-15 22:31:13 +00:00
|
|
|
RTarget: "800MHz",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-10-14 01:38:08 +00:00
|
|
|
{
|
|
|
|
Name: "does not meet first constraint",
|
|
|
|
Result: false,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{
|
|
|
|
{
|
|
|
|
Name: "nvidia/gpu",
|
|
|
|
Count: 1,
|
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: "=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.model}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: "2080ti",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: ">",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: "1320.5 MB",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "<=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.pci_bandwidth}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: ".98 GiB/s",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.cores_clock}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: "800MHz",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "does not meet second constraint",
|
|
|
|
Result: false,
|
|
|
|
NodeDevices: []*structs.NodeDeviceResource{nvidia},
|
|
|
|
RequestedDevices: []*structs.RequestedDevice{
|
|
|
|
{
|
|
|
|
Name: "nvidia/gpu",
|
|
|
|
Count: 1,
|
|
|
|
Constraints: []*structs.Constraint{
|
|
|
|
{
|
|
|
|
Operand: "=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.model}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: "1080ti",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "<",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.memory}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: "1320.5 MB",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "<=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.pci_bandwidth}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: ".98 GiB/s",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Operand: "=",
|
2019-01-23 00:48:09 +00:00
|
|
|
LTarget: "${device.attr.cores_clock}",
|
2018-10-14 01:38:08 +00:00
|
|
|
RTarget: "800MHz",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-10-10 17:32:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, c := range cases {
|
|
|
|
t.Run(c.Name, func(t *testing.T) {
|
2018-10-13 23:47:53 +00:00
|
|
|
_, ctx := testContext(t)
|
2018-10-10 17:32:44 +00:00
|
|
|
checker := NewDeviceChecker(ctx)
|
|
|
|
checker.SetTaskGroup(getTg(c.RequestedDevices...))
|
|
|
|
if act := checker.Feasible(getNode(c.NodeDevices...)); act != c.Result {
|
|
|
|
t.Fatalf("got %v; want %v", act, c.Result)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2018-10-14 01:38:08 +00:00
|
|
|
|
|
|
|
func TestCheckAttributeConstraint(t *testing.T) {
|
|
|
|
type tcase struct {
|
|
|
|
op string
|
|
|
|
lVal, rVal *psstructs.Attribute
|
|
|
|
result bool
|
|
|
|
}
|
|
|
|
cases := []tcase{
|
|
|
|
{
|
|
|
|
op: "=",
|
|
|
|
lVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
rVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
result: true,
|
|
|
|
},
|
2018-11-13 23:57:59 +00:00
|
|
|
{
|
|
|
|
op: "=",
|
|
|
|
lVal: nil,
|
|
|
|
rVal: nil,
|
|
|
|
result: false,
|
|
|
|
},
|
2018-10-14 01:38:08 +00:00
|
|
|
{
|
|
|
|
op: "is",
|
|
|
|
lVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
rVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "==",
|
|
|
|
lVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
rVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "!=",
|
|
|
|
lVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
rVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
result: false,
|
|
|
|
},
|
2018-11-13 23:57:59 +00:00
|
|
|
{
|
|
|
|
op: "!=",
|
|
|
|
lVal: nil,
|
|
|
|
rVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "!=",
|
|
|
|
lVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
rVal: nil,
|
|
|
|
result: true,
|
|
|
|
},
|
2018-10-14 01:38:08 +00:00
|
|
|
{
|
|
|
|
op: "!=",
|
|
|
|
lVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
rVal: psstructs.NewStringAttribute("bar"),
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "not",
|
|
|
|
lVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
rVal: psstructs.NewStringAttribute("bar"),
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: structs.ConstraintVersion,
|
|
|
|
lVal: psstructs.NewStringAttribute("1.2.3"),
|
|
|
|
rVal: psstructs.NewStringAttribute("~> 1.0"),
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: structs.ConstraintRegex,
|
|
|
|
lVal: psstructs.NewStringAttribute("foobarbaz"),
|
|
|
|
rVal: psstructs.NewStringAttribute("[\\w]+"),
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: "<",
|
|
|
|
lVal: psstructs.NewStringAttribute("foo"),
|
|
|
|
rVal: psstructs.NewStringAttribute("bar"),
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: structs.ConstraintSetContains,
|
|
|
|
lVal: psstructs.NewStringAttribute("foo,bar,baz"),
|
|
|
|
rVal: psstructs.NewStringAttribute("foo, bar "),
|
|
|
|
result: true,
|
|
|
|
},
|
2018-10-15 22:31:13 +00:00
|
|
|
{
|
|
|
|
op: structs.ConstraintSetContainsAll,
|
|
|
|
lVal: psstructs.NewStringAttribute("foo,bar,baz"),
|
|
|
|
rVal: psstructs.NewStringAttribute("foo, bar "),
|
|
|
|
result: true,
|
|
|
|
},
|
2018-10-14 01:38:08 +00:00
|
|
|
{
|
|
|
|
op: structs.ConstraintSetContains,
|
|
|
|
lVal: psstructs.NewStringAttribute("foo,bar,baz"),
|
|
|
|
rVal: psstructs.NewStringAttribute("foo,bam"),
|
|
|
|
result: false,
|
|
|
|
},
|
2018-10-15 22:31:13 +00:00
|
|
|
{
|
|
|
|
op: structs.ConstraintSetContainsAny,
|
|
|
|
lVal: psstructs.NewStringAttribute("foo,bar,baz"),
|
|
|
|
rVal: psstructs.NewStringAttribute("foo,bam"),
|
|
|
|
result: true,
|
|
|
|
},
|
2018-11-13 23:57:59 +00:00
|
|
|
{
|
|
|
|
op: structs.ConstraintAttributeIsSet,
|
|
|
|
lVal: psstructs.NewStringAttribute("foo,bar,baz"),
|
|
|
|
result: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: structs.ConstraintAttributeIsSet,
|
|
|
|
lVal: nil,
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: structs.ConstraintAttributeIsNotSet,
|
|
|
|
lVal: psstructs.NewStringAttribute("foo,bar,baz"),
|
|
|
|
result: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
op: structs.ConstraintAttributeIsNotSet,
|
|
|
|
lVal: nil,
|
|
|
|
result: true,
|
|
|
|
},
|
2018-10-14 01:38:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range cases {
|
|
|
|
_, ctx := testContext(t)
|
2018-11-13 23:57:59 +00:00
|
|
|
if res := checkAttributeConstraint(ctx, tc.op, tc.lVal, tc.rVal, tc.lVal != nil, tc.rVal != nil); res != tc.result {
|
2018-10-14 01:38:08 +00:00
|
|
|
t.Fatalf("TC: %#v, Result: %v", tc, res)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|