open-nomad/nomad/structs/funcs_test.go

1058 lines
23 KiB
Go
Raw Normal View History

2015-08-05 00:19:05 +00:00
package structs
2015-09-07 22:08:50 +00:00
import (
2018-01-12 21:58:44 +00:00
"encoding/base64"
"errors"
2016-08-31 20:40:43 +00:00
"fmt"
2015-09-07 22:08:50 +00:00
"testing"
2017-08-20 21:30:27 +00:00
lru "github.com/hashicorp/golang-lru"
"github.com/hashicorp/nomad/ci"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/stretchr/testify/assert"
2018-10-02 20:36:04 +00:00
"github.com/stretchr/testify/require"
2015-09-07 22:08:50 +00:00
)
2015-08-05 00:19:05 +00:00
func TestRemoveAllocs(t *testing.T) {
ci.Parallel(t)
2015-08-05 00:19:05 +00:00
l := []*Allocation{
2017-09-26 22:26:33 +00:00
{ID: "foo"},
{ID: "bar"},
{ID: "baz"},
{ID: "zip"},
2015-08-05 00:19:05 +00:00
}
out := RemoveAllocs(l, []*Allocation{l[1], l[3]})
2015-08-05 00:19:05 +00:00
if len(out) != 2 {
t.Fatalf("bad: %#v", out)
}
if out[0].ID != "foo" && out[1].ID != "baz" {
t.Fatalf("bad: %#v", out)
}
}
2015-08-05 00:28:19 +00:00
2015-12-15 03:20:57 +00:00
func TestFilterTerminalAllocs(t *testing.T) {
ci.Parallel(t)
2015-08-23 01:27:51 +00:00
l := []*Allocation{
2017-09-26 22:26:33 +00:00
{
2016-08-31 20:40:43 +00:00
ID: "bar",
Name: "myname1",
DesiredStatus: AllocDesiredStatusEvict,
},
2017-09-26 22:26:33 +00:00
{ID: "baz", DesiredStatus: AllocDesiredStatusStop},
{
ID: "foo",
DesiredStatus: AllocDesiredStatusRun,
ClientStatus: AllocClientStatusPending,
},
2017-09-26 22:26:33 +00:00
{
ID: "bam",
2016-08-31 20:40:43 +00:00
Name: "myname",
DesiredStatus: AllocDesiredStatusRun,
ClientStatus: AllocClientStatusComplete,
2016-08-31 20:40:43 +00:00
CreateIndex: 5,
},
2017-09-26 22:26:33 +00:00
{
2016-08-31 20:40:43 +00:00
ID: "lol",
Name: "myname",
DesiredStatus: AllocDesiredStatusRun,
ClientStatus: AllocClientStatusComplete,
CreateIndex: 2,
},
2015-08-23 01:27:51 +00:00
}
out, terminalAllocs := FilterTerminalAllocs(l)
if len(out) != 1 {
2015-08-23 01:27:51 +00:00
t.Fatalf("bad: %#v", out)
}
if out[0].ID != "foo" {
2015-08-23 01:27:51 +00:00
t.Fatalf("bad: %#v", out)
}
2016-08-31 20:40:43 +00:00
if len(terminalAllocs) != 3 {
for _, o := range terminalAllocs {
fmt.Printf("%#v \n", o)
}
t.Fatalf("bad: %#v", terminalAllocs)
}
2016-08-31 20:40:43 +00:00
if terminalAllocs["myname"].ID != "bam" {
t.Fatalf("bad: %#v", terminalAllocs["myname"])
}
2015-08-23 01:27:51 +00:00
}
2018-10-02 20:36:04 +00:00
// COMPAT(0.11): Remove in 0.11
func TestAllocsFit_PortsOvercommitted_Old(t *testing.T) {
ci.Parallel(t)
n := &Node{
Resources: &Resources{
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
2015-09-15 18:12:46 +00:00
Device: "eth0",
CIDR: "10.0.0.0/8",
MBits: 100,
},
2015-08-05 00:28:19 +00:00
},
},
}
a1 := &Allocation{
2016-08-27 03:38:50 +00:00
Job: &Job{
TaskGroups: []*TaskGroup{
{
Name: "web",
EphemeralDisk: DefaultEphemeralDisk(),
2016-08-27 03:38:50 +00:00
},
},
},
TaskResources: map[string]*Resources{
2017-09-26 22:26:33 +00:00
"web": {
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
2015-09-15 18:12:46 +00:00
Device: "eth0",
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"main", 8000, 80, ""}},
},
},
2015-08-05 00:28:19 +00:00
},
},
}
// Should fit one allocation
fit, dim, _, err := AllocsFit(n, []*Allocation{a1}, nil, false)
if err != nil {
t.Fatalf("err: %v", err)
}
if !fit {
2015-09-15 18:12:46 +00:00
t.Fatalf("Bad: %s", dim)
2015-08-05 00:28:19 +00:00
}
// Should not fit second allocation
fit, _, _, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
if err != nil {
t.Fatalf("err: %v", err)
}
if fit {
t.Fatalf("Bad")
2015-08-05 00:28:19 +00:00
}
}
2015-08-05 00:48:24 +00:00
2018-10-02 20:36:04 +00:00
// COMPAT(0.11): Remove in 0.11
func TestAllocsFit_Old(t *testing.T) {
ci.Parallel(t)
2018-10-02 20:36:04 +00:00
require := require.New(t)
2015-08-05 00:48:24 +00:00
n := &Node{
Resources: &Resources{
2015-09-23 18:14:32 +00:00
CPU: 2000,
2015-08-05 00:48:24 +00:00
MemoryMB: 2048,
DiskMB: 10000,
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
2015-09-15 18:12:46 +00:00
Device: "eth0",
CIDR: "10.0.0.0/8",
MBits: 100,
2015-08-05 00:48:24 +00:00
},
},
},
Reserved: &Resources{
2015-09-23 18:14:32 +00:00
CPU: 1000,
2015-08-05 00:48:24 +00:00
MemoryMB: 1024,
DiskMB: 5000,
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
2015-09-15 18:12:46 +00:00
Device: "eth0",
IP: "10.0.0.1",
2015-08-05 00:48:24 +00:00
MBits: 50,
ReservedPorts: []Port{{"main", 80, 0, ""}},
2015-08-05 00:48:24 +00:00
},
},
},
}
a1 := &Allocation{
Resources: &Resources{
2015-09-23 18:14:32 +00:00
CPU: 1000,
2015-08-05 00:48:24 +00:00
MemoryMB: 1024,
DiskMB: 5000,
Networks: []*NetworkResource{
2017-09-26 22:26:33 +00:00
{
2015-09-15 18:12:46 +00:00
Device: "eth0",
IP: "10.0.0.1",
2015-08-05 00:48:24 +00:00
MBits: 50,
ReservedPorts: []Port{{"main", 8000, 80, ""}},
2015-08-05 00:48:24 +00:00
},
},
},
}
// Should fit one allocation
fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
2018-10-02 20:36:04 +00:00
require.NoError(err)
require.True(fit)
core: fix node reservation scoring The BinPackIter accounted for node reservations twice when scoring nodes which could bias scores toward nodes with reservations. Pseudo-code for previous algorithm: ``` proposed = reservedResources + sum(allocsResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are added to the total resources used by allocations, and then the node's reserved resources are later substracted from the node's overall resources. The new algorithm is: ``` proposed = sum(allocResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are no longer added to the total resources used by allocations. My guess as to how this bug happened is that the resource utilization variable (`util`) is calculated and returned by the `AllocsFit` function which needs to take reserved resources into account as a basic feasibility check. To avoid re-calculating alloc resource usage (because there may be a large number of allocs), we reused `util` in the `ScoreFit` function. `ScoreFit` properly accounts for reserved resources by subtracting them from the node's overall resources. However since `util` _also_ took reserved resources into account the score would be incorrect. Prior to the fix the added test output: ``` Node: reserved Score: 1.0000 Node: reserved2 Score: 1.0000 Node: no-reserved Score: 0.9741 ``` The scores being 1.0 for *both* nodes with reserved resources is a good hint something is wrong as they should receive different scores. Upon further inspection the double accounting of reserved resources caused their scores to be >1.0 and clamped. After the fix the added test outputs: ``` Node: no-reserved Score: 0.9741 Node: reserved Score: 0.9480 Node: reserved2 Score: 0.8717 ```
2020-04-15 21:24:47 +00:00
require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
2015-08-13 18:54:59 +00:00
2015-08-05 00:48:24 +00:00
// Should not fit second allocation
fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
2018-10-02 20:36:04 +00:00
require.NoError(err)
require.False(fit)
core: fix node reservation scoring The BinPackIter accounted for node reservations twice when scoring nodes which could bias scores toward nodes with reservations. Pseudo-code for previous algorithm: ``` proposed = reservedResources + sum(allocsResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are added to the total resources used by allocations, and then the node's reserved resources are later substracted from the node's overall resources. The new algorithm is: ``` proposed = sum(allocResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are no longer added to the total resources used by allocations. My guess as to how this bug happened is that the resource utilization variable (`util`) is calculated and returned by the `AllocsFit` function which needs to take reserved resources into account as a basic feasibility check. To avoid re-calculating alloc resource usage (because there may be a large number of allocs), we reused `util` in the `ScoreFit` function. `ScoreFit` properly accounts for reserved resources by subtracting them from the node's overall resources. However since `util` _also_ took reserved resources into account the score would be incorrect. Prior to the fix the added test output: ``` Node: reserved Score: 1.0000 Node: reserved2 Score: 1.0000 Node: no-reserved Score: 0.9741 ``` The scores being 1.0 for *both* nodes with reserved resources is a good hint something is wrong as they should receive different scores. Upon further inspection the double accounting of reserved resources caused their scores to be >1.0 and clamped. After the fix the added test outputs: ``` Node: no-reserved Score: 0.9741 Node: reserved Score: 0.9480 Node: reserved2 Score: 0.8717 ```
2020-04-15 21:24:47 +00:00
require.EqualValues(2000, used.Flattened.Cpu.CpuShares)
require.EqualValues(2048, used.Flattened.Memory.MemoryMB)
2015-08-13 18:54:59 +00:00
}
2018-10-02 20:36:04 +00:00
// COMPAT(0.11): Remove in 0.11
func TestAllocsFit_TerminalAlloc_Old(t *testing.T) {
ci.Parallel(t)
2018-10-02 20:36:04 +00:00
require := require.New(t)
2018-09-24 20:59:01 +00:00
n := &Node{
Resources: &Resources{
CPU: 2000,
MemoryMB: 2048,
DiskMB: 10000,
2018-12-12 22:32:22 +00:00
Networks: []*NetworkResource{
{
Device: "eth0",
CIDR: "10.0.0.0/8",
MBits: 100,
},
},
},
Reserved: &Resources{
CPU: 1000,
MemoryMB: 1024,
DiskMB: 5000,
2018-09-24 20:59:01 +00:00
Networks: []*NetworkResource{
{
Device: "eth0",
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"main", 80, 0, ""}},
2018-09-24 20:59:01 +00:00
},
},
},
}
a1 := &Allocation{
Resources: &Resources{
CPU: 1000,
MemoryMB: 1024,
DiskMB: 5000,
Networks: []*NetworkResource{
{
Device: "eth0",
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"main", 8000, 0, ""}},
2018-09-24 20:59:01 +00:00
},
},
},
}
// Should fit one allocation
fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
2018-10-02 20:36:04 +00:00
require.NoError(err)
require.True(fit)
core: fix node reservation scoring The BinPackIter accounted for node reservations twice when scoring nodes which could bias scores toward nodes with reservations. Pseudo-code for previous algorithm: ``` proposed = reservedResources + sum(allocsResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are added to the total resources used by allocations, and then the node's reserved resources are later substracted from the node's overall resources. The new algorithm is: ``` proposed = sum(allocResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are no longer added to the total resources used by allocations. My guess as to how this bug happened is that the resource utilization variable (`util`) is calculated and returned by the `AllocsFit` function which needs to take reserved resources into account as a basic feasibility check. To avoid re-calculating alloc resource usage (because there may be a large number of allocs), we reused `util` in the `ScoreFit` function. `ScoreFit` properly accounts for reserved resources by subtracting them from the node's overall resources. However since `util` _also_ took reserved resources into account the score would be incorrect. Prior to the fix the added test output: ``` Node: reserved Score: 1.0000 Node: reserved2 Score: 1.0000 Node: no-reserved Score: 0.9741 ``` The scores being 1.0 for *both* nodes with reserved resources is a good hint something is wrong as they should receive different scores. Upon further inspection the double accounting of reserved resources caused their scores to be >1.0 and clamped. After the fix the added test outputs: ``` Node: no-reserved Score: 0.9741 Node: reserved Score: 0.9480 Node: reserved2 Score: 0.8717 ```
2020-04-15 21:24:47 +00:00
require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
2018-09-24 20:59:01 +00:00
// Should fit second allocation since it is terminal
a2 := a1.Copy()
a2.DesiredStatus = AllocDesiredStatusStop
fit, _, used, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false)
2018-10-02 20:36:04 +00:00
require.NoError(err)
require.True(fit)
core: fix node reservation scoring The BinPackIter accounted for node reservations twice when scoring nodes which could bias scores toward nodes with reservations. Pseudo-code for previous algorithm: ``` proposed = reservedResources + sum(allocsResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are added to the total resources used by allocations, and then the node's reserved resources are later substracted from the node's overall resources. The new algorithm is: ``` proposed = sum(allocResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are no longer added to the total resources used by allocations. My guess as to how this bug happened is that the resource utilization variable (`util`) is calculated and returned by the `AllocsFit` function which needs to take reserved resources into account as a basic feasibility check. To avoid re-calculating alloc resource usage (because there may be a large number of allocs), we reused `util` in the `ScoreFit` function. `ScoreFit` properly accounts for reserved resources by subtracting them from the node's overall resources. However since `util` _also_ took reserved resources into account the score would be incorrect. Prior to the fix the added test output: ``` Node: reserved Score: 1.0000 Node: reserved2 Score: 1.0000 Node: no-reserved Score: 0.9741 ``` The scores being 1.0 for *both* nodes with reserved resources is a good hint something is wrong as they should receive different scores. Upon further inspection the double accounting of reserved resources caused their scores to be >1.0 and clamped. After the fix the added test outputs: ``` Node: no-reserved Score: 0.9741 Node: reserved Score: 0.9480 Node: reserved2 Score: 0.8717 ```
2020-04-15 21:24:47 +00:00
require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
2018-10-02 20:36:04 +00:00
}
func TestAllocsFit(t *testing.T) {
ci.Parallel(t)
2018-10-02 20:36:04 +00:00
require := require.New(t)
n := &Node{
NodeResources: &NodeResources{
Cpu: NodeCpuResources{
CpuShares: 2000,
TotalCpuCores: 2,
ReservableCpuCores: []uint16{0, 1},
2018-10-02 20:36:04 +00:00
},
Memory: NodeMemoryResources{
MemoryMB: 2048,
},
Disk: NodeDiskResources{
DiskMB: 10000,
},
Networks: []*NetworkResource{
{
Device: "eth0",
CIDR: "10.0.0.0/8",
MBits: 100,
},
},
NodeNetworks: []*NodeNetworkResource{
{
Mode: "host",
Device: "eth0",
Addresses: []NodeNetworkAddress{
{
Address: "10.0.0.1",
},
},
},
},
2018-10-02 20:36:04 +00:00
},
ReservedResources: &NodeReservedResources{
Cpu: NodeReservedCpuResources{
2018-10-04 21:33:09 +00:00
CpuShares: 1000,
2018-10-02 20:36:04 +00:00
},
Memory: NodeReservedMemoryResources{
MemoryMB: 1024,
},
Disk: NodeReservedDiskResources{
DiskMB: 5000,
},
Networks: NodeReservedNetworkResources{
ReservedHostPorts: "80",
},
},
2018-09-24 20:59:01 +00:00
}
2018-10-02 20:36:04 +00:00
a1 := &Allocation{
AllocatedResources: &AllocatedResources{
Tasks: map[string]*AllocatedTaskResources{
"web": {
Cpu: AllocatedCpuResources{
CpuShares: 1000,
ReservedCores: []uint16{},
2018-10-02 20:36:04 +00:00
},
Memory: AllocatedMemoryResources{
MemoryMB: 1024,
},
},
},
Shared: AllocatedSharedResources{
DiskMB: 5000,
Networks: Networks{
{
Mode: "host",
IP: "10.0.0.1",
ReservedPorts: []Port{{"main", 8000, 0, ""}},
},
},
Ports: AllocatedPorts{
{
Label: "main",
Value: 8000,
HostIP: "10.0.0.1",
},
},
2018-10-02 20:36:04 +00:00
},
},
2018-09-24 20:59:01 +00:00
}
2018-10-02 20:36:04 +00:00
// Should fit one allocation
fit, dim, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
2018-10-02 20:36:04 +00:00
require.NoError(err)
require.True(fit, "failed for dimension %q", dim)
core: fix node reservation scoring The BinPackIter accounted for node reservations twice when scoring nodes which could bias scores toward nodes with reservations. Pseudo-code for previous algorithm: ``` proposed = reservedResources + sum(allocsResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are added to the total resources used by allocations, and then the node's reserved resources are later substracted from the node's overall resources. The new algorithm is: ``` proposed = sum(allocResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are no longer added to the total resources used by allocations. My guess as to how this bug happened is that the resource utilization variable (`util`) is calculated and returned by the `AllocsFit` function which needs to take reserved resources into account as a basic feasibility check. To avoid re-calculating alloc resource usage (because there may be a large number of allocs), we reused `util` in the `ScoreFit` function. `ScoreFit` properly accounts for reserved resources by subtracting them from the node's overall resources. However since `util` _also_ took reserved resources into account the score would be incorrect. Prior to the fix the added test output: ``` Node: reserved Score: 1.0000 Node: reserved2 Score: 1.0000 Node: no-reserved Score: 0.9741 ``` The scores being 1.0 for *both* nodes with reserved resources is a good hint something is wrong as they should receive different scores. Upon further inspection the double accounting of reserved resources caused their scores to be >1.0 and clamped. After the fix the added test outputs: ``` Node: no-reserved Score: 0.9741 Node: reserved Score: 0.9480 Node: reserved2 Score: 0.8717 ```
2020-04-15 21:24:47 +00:00
require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
2018-10-02 20:36:04 +00:00
// Should not fit second allocation
fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
2018-10-02 20:36:04 +00:00
require.NoError(err)
require.False(fit)
core: fix node reservation scoring The BinPackIter accounted for node reservations twice when scoring nodes which could bias scores toward nodes with reservations. Pseudo-code for previous algorithm: ``` proposed = reservedResources + sum(allocsResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are added to the total resources used by allocations, and then the node's reserved resources are later substracted from the node's overall resources. The new algorithm is: ``` proposed = sum(allocResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are no longer added to the total resources used by allocations. My guess as to how this bug happened is that the resource utilization variable (`util`) is calculated and returned by the `AllocsFit` function which needs to take reserved resources into account as a basic feasibility check. To avoid re-calculating alloc resource usage (because there may be a large number of allocs), we reused `util` in the `ScoreFit` function. `ScoreFit` properly accounts for reserved resources by subtracting them from the node's overall resources. However since `util` _also_ took reserved resources into account the score would be incorrect. Prior to the fix the added test output: ``` Node: reserved Score: 1.0000 Node: reserved2 Score: 1.0000 Node: no-reserved Score: 0.9741 ``` The scores being 1.0 for *both* nodes with reserved resources is a good hint something is wrong as they should receive different scores. Upon further inspection the double accounting of reserved resources caused their scores to be >1.0 and clamped. After the fix the added test outputs: ``` Node: no-reserved Score: 0.9741 Node: reserved Score: 0.9480 Node: reserved2 Score: 0.8717 ```
2020-04-15 21:24:47 +00:00
require.EqualValues(2000, used.Flattened.Cpu.CpuShares)
require.EqualValues(2048, used.Flattened.Memory.MemoryMB)
a2 := &Allocation{
AllocatedResources: &AllocatedResources{
Tasks: map[string]*AllocatedTaskResources{
"web": {
Cpu: AllocatedCpuResources{
CpuShares: 500,
ReservedCores: []uint16{0},
},
Memory: AllocatedMemoryResources{
MemoryMB: 512,
},
},
},
Shared: AllocatedSharedResources{
DiskMB: 1000,
Networks: Networks{
{
Mode: "host",
IP: "10.0.0.1",
},
},
},
},
}
// Should fit one allocation
fit, dim, used, err = AllocsFit(n, []*Allocation{a2}, nil, false)
require.NoError(err)
require.True(fit, "failed for dimension %q", dim)
require.EqualValues(500, used.Flattened.Cpu.CpuShares)
require.EqualValues([]uint16{0}, used.Flattened.Cpu.ReservedCores)
require.EqualValues(512, used.Flattened.Memory.MemoryMB)
// Should not fit second allocation
fit, dim, used, err = AllocsFit(n, []*Allocation{a2, a2}, nil, false)
require.NoError(err)
require.False(fit)
require.EqualValues("cores", dim)
require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
require.EqualValues([]uint16{0}, used.Flattened.Cpu.ReservedCores)
require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
2018-10-02 20:36:04 +00:00
}
func TestAllocsFit_TerminalAlloc(t *testing.T) {
ci.Parallel(t)
2018-10-02 20:36:04 +00:00
require := require.New(t)
n := &Node{
NodeResources: &NodeResources{
Cpu: NodeCpuResources{
2018-10-04 21:33:09 +00:00
CpuShares: 2000,
2018-10-02 20:36:04 +00:00
},
Memory: NodeMemoryResources{
MemoryMB: 2048,
},
Disk: NodeDiskResources{
DiskMB: 10000,
},
Networks: []*NetworkResource{
{
Device: "eth0",
CIDR: "10.0.0.0/8",
IP: "10.0.0.1",
MBits: 100,
},
},
},
ReservedResources: &NodeReservedResources{
Cpu: NodeReservedCpuResources{
2018-10-04 21:33:09 +00:00
CpuShares: 1000,
2018-10-02 20:36:04 +00:00
},
Memory: NodeReservedMemoryResources{
MemoryMB: 1024,
},
Disk: NodeReservedDiskResources{
DiskMB: 5000,
},
Networks: NodeReservedNetworkResources{
ReservedHostPorts: "80",
},
},
2018-09-24 20:59:01 +00:00
}
2018-10-02 20:36:04 +00:00
a1 := &Allocation{
AllocatedResources: &AllocatedResources{
Tasks: map[string]*AllocatedTaskResources{
"web": {
Cpu: AllocatedCpuResources{
CpuShares: 1000,
},
Memory: AllocatedMemoryResources{
MemoryMB: 1024,
},
Networks: []*NetworkResource{
{
Device: "eth0",
IP: "10.0.0.1",
MBits: 50,
ReservedPorts: []Port{{"main", 8000, 80, ""}},
2018-10-02 20:36:04 +00:00
},
},
},
},
Shared: AllocatedSharedResources{
DiskMB: 5000,
},
},
2018-09-24 20:59:01 +00:00
}
2018-10-02 20:36:04 +00:00
// Should fit one allocation
fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
2018-10-02 20:36:04 +00:00
require.NoError(err)
require.True(fit)
core: fix node reservation scoring The BinPackIter accounted for node reservations twice when scoring nodes which could bias scores toward nodes with reservations. Pseudo-code for previous algorithm: ``` proposed = reservedResources + sum(allocsResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are added to the total resources used by allocations, and then the node's reserved resources are later substracted from the node's overall resources. The new algorithm is: ``` proposed = sum(allocResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are no longer added to the total resources used by allocations. My guess as to how this bug happened is that the resource utilization variable (`util`) is calculated and returned by the `AllocsFit` function which needs to take reserved resources into account as a basic feasibility check. To avoid re-calculating alloc resource usage (because there may be a large number of allocs), we reused `util` in the `ScoreFit` function. `ScoreFit` properly accounts for reserved resources by subtracting them from the node's overall resources. However since `util` _also_ took reserved resources into account the score would be incorrect. Prior to the fix the added test output: ``` Node: reserved Score: 1.0000 Node: reserved2 Score: 1.0000 Node: no-reserved Score: 0.9741 ``` The scores being 1.0 for *both* nodes with reserved resources is a good hint something is wrong as they should receive different scores. Upon further inspection the double accounting of reserved resources caused their scores to be >1.0 and clamped. After the fix the added test outputs: ``` Node: no-reserved Score: 0.9741 Node: reserved Score: 0.9480 Node: reserved2 Score: 0.8717 ```
2020-04-15 21:24:47 +00:00
require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
2018-10-02 20:36:04 +00:00
// Should fit second allocation since it is terminal
a2 := a1.Copy()
a2.DesiredStatus = AllocDesiredStatusStop
fit, dim, used, err := AllocsFit(n, []*Allocation{a1, a2}, nil, false)
2018-10-02 20:36:04 +00:00
require.NoError(err)
require.True(fit, dim)
core: fix node reservation scoring The BinPackIter accounted for node reservations twice when scoring nodes which could bias scores toward nodes with reservations. Pseudo-code for previous algorithm: ``` proposed = reservedResources + sum(allocsResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are added to the total resources used by allocations, and then the node's reserved resources are later substracted from the node's overall resources. The new algorithm is: ``` proposed = sum(allocResources) available = nodeResources - reservedResources score = 1 - (proposed / available) ``` The node's reserved resources are no longer added to the total resources used by allocations. My guess as to how this bug happened is that the resource utilization variable (`util`) is calculated and returned by the `AllocsFit` function which needs to take reserved resources into account as a basic feasibility check. To avoid re-calculating alloc resource usage (because there may be a large number of allocs), we reused `util` in the `ScoreFit` function. `ScoreFit` properly accounts for reserved resources by subtracting them from the node's overall resources. However since `util` _also_ took reserved resources into account the score would be incorrect. Prior to the fix the added test output: ``` Node: reserved Score: 1.0000 Node: reserved2 Score: 1.0000 Node: no-reserved Score: 0.9741 ``` The scores being 1.0 for *both* nodes with reserved resources is a good hint something is wrong as they should receive different scores. Upon further inspection the double accounting of reserved resources caused their scores to be >1.0 and clamped. After the fix the added test outputs: ``` Node: no-reserved Score: 0.9741 Node: reserved Score: 0.9480 Node: reserved2 Score: 0.8717 ```
2020-04-15 21:24:47 +00:00
require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
2018-09-24 20:59:01 +00:00
}
// Tests that AllocsFit detects device collisions
func TestAllocsFit_Devices(t *testing.T) {
ci.Parallel(t)
require := require.New(t)
n := MockNvidiaNode()
a1 := &Allocation{
AllocatedResources: &AllocatedResources{
Tasks: map[string]*AllocatedTaskResources{
"web": {
Cpu: AllocatedCpuResources{
CpuShares: 1000,
},
Memory: AllocatedMemoryResources{
MemoryMB: 1024,
},
Devices: []*AllocatedDeviceResource{
{
Type: "gpu",
Vendor: "nvidia",
Name: "1080ti",
DeviceIDs: []string{n.NodeResources.Devices[0].Instances[0].ID},
},
},
},
},
Shared: AllocatedSharedResources{
DiskMB: 5000,
},
},
}
a2 := a1.Copy()
a2.AllocatedResources.Tasks["web"] = &AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: 1000,
},
Memory: AllocatedMemoryResources{
MemoryMB: 1024,
},
Devices: []*AllocatedDeviceResource{
{
Type: "gpu",
Vendor: "nvidia",
Name: "1080ti",
DeviceIDs: []string{n.NodeResources.Devices[0].Instances[0].ID}, // Use the same ID
},
},
}
// Should fit one allocation
fit, _, _, err := AllocsFit(n, []*Allocation{a1}, nil, true)
require.NoError(err)
require.True(fit)
// Should not fit second allocation
2018-10-31 20:57:43 +00:00
fit, msg, _, err := AllocsFit(n, []*Allocation{a1, a2}, nil, true)
require.NoError(err)
require.False(fit)
require.Equal("device oversubscribed", msg)
// Should not fit second allocation but won't detect since we disabled
// devices
2018-10-31 20:57:43 +00:00
fit, _, _, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false)
require.NoError(err)
require.True(fit)
}
// TestAllocsFit_MemoryOversubscription asserts that only reserved memory is
// used for capacity
func TestAllocsFit_MemoryOversubscription(t *testing.T) {
ci.Parallel(t)
n := &Node{
NodeResources: &NodeResources{
Cpu: NodeCpuResources{
CpuShares: 2000,
},
Memory: NodeMemoryResources{
MemoryMB: 2048,
},
},
}
a1 := &Allocation{
AllocatedResources: &AllocatedResources{
Tasks: map[string]*AllocatedTaskResources{
"web": {
Cpu: AllocatedCpuResources{
CpuShares: 100,
},
Memory: AllocatedMemoryResources{
MemoryMB: 1000,
MemoryMaxMB: 4000,
},
},
},
},
}
// Should fit one allocation
fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
require.NoError(t, err)
require.True(t, fit)
require.EqualValues(t, 100, used.Flattened.Cpu.CpuShares)
require.EqualValues(t, 1000, used.Flattened.Memory.MemoryMB)
require.EqualValues(t, 4000, used.Flattened.Memory.MemoryMaxMB)
// Should fit second allocation
fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
require.NoError(t, err)
require.True(t, fit)
require.EqualValues(t, 200, used.Flattened.Cpu.CpuShares)
require.EqualValues(t, 2000, used.Flattened.Memory.MemoryMB)
require.EqualValues(t, 8000, used.Flattened.Memory.MemoryMaxMB)
// Should not fit a third allocation
fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1, a1}, nil, false)
require.NoError(t, err)
require.False(t, fit)
require.EqualValues(t, 300, used.Flattened.Cpu.CpuShares)
require.EqualValues(t, 3000, used.Flattened.Memory.MemoryMB)
require.EqualValues(t, 12000, used.Flattened.Memory.MemoryMaxMB)
}
2018-10-02 20:36:04 +00:00
// COMPAT(0.11): Remove in 0.11
2020-04-24 14:47:43 +00:00
func TestScoreFitBinPack_Old(t *testing.T) {
ci.Parallel(t)
2015-08-13 18:54:59 +00:00
node := &Node{}
node.Resources = &Resources{
CPU: 4096,
MemoryMB: 8192,
}
node.Reserved = &Resources{
CPU: 2048,
MemoryMB: 4096,
}
// Test a perfect fit
2018-10-04 21:33:09 +00:00
util := &ComparableResources{
2018-10-02 20:36:04 +00:00
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: 2048,
},
Memory: AllocatedMemoryResources{
MemoryMB: 4096,
},
},
2015-08-13 18:54:59 +00:00
}
2020-04-24 14:47:43 +00:00
score := ScoreFitBinPack(node, util)
2015-08-13 18:54:59 +00:00
if score != 18.0 {
t.Fatalf("bad: %v", score)
}
// Test the worst fit
2018-10-04 21:33:09 +00:00
util = &ComparableResources{
2018-10-02 20:36:04 +00:00
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: 0,
},
Memory: AllocatedMemoryResources{
MemoryMB: 0,
},
},
2015-08-13 18:54:59 +00:00
}
2020-04-24 14:47:43 +00:00
score = ScoreFitBinPack(node, util)
2015-08-13 18:54:59 +00:00
if score != 0.0 {
t.Fatalf("bad: %v", score)
}
// Test a mid-case scenario
2018-10-04 21:33:09 +00:00
util = &ComparableResources{
2018-10-02 20:36:04 +00:00
Flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{
CpuShares: 1024,
},
Memory: AllocatedMemoryResources{
MemoryMB: 2048,
},
},
}
2020-04-24 14:47:43 +00:00
score = ScoreFitBinPack(node, util)
2018-10-02 20:36:04 +00:00
if score < 10.0 || score > 16.0 {
t.Fatalf("bad: %v", score)
}
}
2020-04-24 14:47:43 +00:00
func TestScoreFitBinPack(t *testing.T) {
ci.Parallel(t)
2018-10-02 20:36:04 +00:00
node := &Node{}
node.NodeResources = &NodeResources{
Cpu: NodeCpuResources{
2018-10-04 21:33:09 +00:00
CpuShares: 4096,
2018-10-02 20:36:04 +00:00
},
Memory: NodeMemoryResources{
MemoryMB: 8192,
},
}
node.ReservedResources = &NodeReservedResources{
Cpu: NodeReservedCpuResources{
2018-10-04 21:33:09 +00:00
CpuShares: 2048,
2018-10-02 20:36:04 +00:00
},
Memory: NodeReservedMemoryResources{
MemoryMB: 4096,
},
}
2020-04-24 14:47:43 +00:00
cases := []struct {
name string
flattened AllocatedTaskResources
binPackScore float64
spreadScore float64
}{
{
2020-05-01 17:12:52 +00:00
name: "almost filled node, but with just enough hole",
2020-04-24 14:47:43 +00:00
flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{CpuShares: 2048},
Memory: AllocatedMemoryResources{MemoryMB: 4096},
2018-10-02 20:36:04 +00:00
},
2020-04-24 14:47:43 +00:00
binPackScore: 18,
spreadScore: 0,
2018-10-02 20:36:04 +00:00
},
2020-04-24 14:47:43 +00:00
{
name: "unutilized node",
flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{CpuShares: 0},
Memory: AllocatedMemoryResources{MemoryMB: 0},
2018-10-02 20:36:04 +00:00
},
2020-04-24 14:47:43 +00:00
binPackScore: 0,
spreadScore: 18,
2018-10-02 20:36:04 +00:00
},
2020-04-24 14:47:43 +00:00
{
name: "mid-case scnario",
flattened: AllocatedTaskResources{
Cpu: AllocatedCpuResources{CpuShares: 1024},
Memory: AllocatedMemoryResources{MemoryMB: 2048},
2018-10-02 20:36:04 +00:00
},
2020-04-24 14:47:43 +00:00
binPackScore: 13.675,
spreadScore: 4.325,
2018-10-02 20:36:04 +00:00
},
2015-08-13 18:54:59 +00:00
}
2020-04-24 14:47:43 +00:00
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
util := &ComparableResources{Flattened: c.flattened}
binPackScore := ScoreFitBinPack(node, util)
require.InDelta(t, c.binPackScore, binPackScore, 0.001, "binpack score")
spreadScore := ScoreFitSpread(node, util)
require.InDelta(t, c.spreadScore, spreadScore, 0.001, "spread score")
require.InDelta(t, 18, binPackScore+spreadScore, 0.001, "score sum")
})
2015-08-13 18:54:59 +00:00
}
2015-08-05 00:48:24 +00:00
}
2015-09-07 22:08:50 +00:00
func TestACLPolicyListHash(t *testing.T) {
ci.Parallel(t)
h1 := ACLPolicyListHash(nil)
assert.NotEqual(t, "", h1)
p1 := &ACLPolicy{
Name: fmt.Sprintf("policy-%s", uuid.Generate()),
Description: "Super cool policy!",
Rules: `
namespace "default" {
policy = "write"
}
node {
policy = "read"
}
agent {
policy = "read"
}
`,
CreateIndex: 10,
ModifyIndex: 20,
}
h2 := ACLPolicyListHash([]*ACLPolicy{p1})
assert.NotEqual(t, "", h2)
assert.NotEqual(t, h1, h2)
// Create P2 as copy of P1 with new name
p2 := &ACLPolicy{}
*p2 = *p1
p2.Name = fmt.Sprintf("policy-%s", uuid.Generate())
h3 := ACLPolicyListHash([]*ACLPolicy{p1, p2})
assert.NotEqual(t, "", h3)
assert.NotEqual(t, h2, h3)
h4 := ACLPolicyListHash([]*ACLPolicy{p2})
assert.NotEqual(t, "", h4)
assert.NotEqual(t, h3, h4)
// ModifyIndex should change the hash
p2.ModifyIndex++
h5 := ACLPolicyListHash([]*ACLPolicy{p2})
assert.NotEqual(t, "", h5)
assert.NotEqual(t, h4, h5)
}
2017-08-20 21:30:27 +00:00
func TestCompileACLObject(t *testing.T) {
ci.Parallel(t)
2017-08-20 21:30:27 +00:00
p1 := &ACLPolicy{
Name: fmt.Sprintf("policy-%s", uuid.Generate()),
2017-08-20 21:30:27 +00:00
Description: "Super cool policy!",
Rules: `
namespace "default" {
policy = "write"
}
node {
policy = "read"
}
agent {
policy = "read"
}
`,
CreateIndex: 10,
ModifyIndex: 20,
}
// Create P2 as copy of P1 with new name
p2 := &ACLPolicy{}
*p2 = *p1
p2.Name = fmt.Sprintf("policy-%s", uuid.Generate())
2017-08-20 21:30:27 +00:00
// Create a small cache
cache, err := lru.New2Q(16)
assert.Nil(t, err)
// Test compilation
aclObj, err := CompileACLObject(cache, []*ACLPolicy{p1})
assert.Nil(t, err)
assert.NotNil(t, aclObj)
// Should get the same object
aclObj2, err := CompileACLObject(cache, []*ACLPolicy{p1})
assert.Nil(t, err)
if aclObj != aclObj2 {
t.Fatalf("expected the same object")
}
// Should get another object
aclObj3, err := CompileACLObject(cache, []*ACLPolicy{p1, p2})
assert.Nil(t, err)
assert.NotNil(t, aclObj3)
if aclObj == aclObj3 {
t.Fatalf("unexpected same object")
}
// Should be order independent
aclObj4, err := CompileACLObject(cache, []*ACLPolicy{p2, p1})
assert.Nil(t, err)
assert.NotNil(t, aclObj4)
if aclObj3 != aclObj4 {
t.Fatalf("expected same object")
}
2017-08-20 21:30:27 +00:00
}
2018-01-12 21:58:44 +00:00
// TestGenerateMigrateToken asserts the migrate token is valid for use in HTTP
// headers and CompareMigrateToken works as expected.
func TestGenerateMigrateToken(t *testing.T) {
ci.Parallel(t)
2018-01-12 21:58:44 +00:00
assert := assert.New(t)
allocID := uuid.Generate()
nodeSecret := uuid.Generate()
token, err := GenerateMigrateToken(allocID, nodeSecret)
assert.Nil(err)
_, err = base64.URLEncoding.DecodeString(token)
assert.Nil(err)
assert.True(CompareMigrateToken(allocID, nodeSecret, token))
assert.False(CompareMigrateToken("x", nodeSecret, token))
assert.False(CompareMigrateToken(allocID, "x", token))
assert.False(CompareMigrateToken(allocID, nodeSecret, "x"))
token2, err := GenerateMigrateToken("x", nodeSecret)
assert.Nil(err)
assert.False(CompareMigrateToken(allocID, nodeSecret, token2))
assert.True(CompareMigrateToken("x", nodeSecret, token2))
}
func TestMergeMultierrorWarnings(t *testing.T) {
ci.Parallel(t)
var errs []error
// empty
str := MergeMultierrorWarnings(errs...)
require.Equal(t, "", str)
// non-empty
errs = []error{
errors.New("foo"),
nil,
errors.New("bar"),
}
str = MergeMultierrorWarnings(errs...)
require.Equal(t, "2 warning(s):\n\n* foo\n* bar", str)
}
func TestVaultPoliciesSet(t *testing.T) {
input := map[string]map[string]*Vault{
"tg1": {
"task1": {
Policies: []string{"policy1-1"},
},
"task2": {
Policies: []string{"policy1-2"},
},
},
"tg2": {
"task1": {
Policies: []string{"policy2"},
},
"task2": {
Policies: []string{"policy2"},
},
},
"tg3": {
"task1": {
Policies: []string{"policy3-1"},
},
},
"tg4": {
"task1": nil,
},
"tg5": {
"task1": {
Policies: []string{"policy2"},
},
},
"tg6": {
"task1": {},
},
"tg7": {
"task1": {
Policies: []string{"policy7", "policy7"},
},
},
"tg8": {
"task1": {
Policies: []string{"policy8-1-1", "policy8-1-2"},
},
},
}
expected := []string{
"policy1-1",
"policy1-2",
"policy2",
"policy3-1",
"policy7",
"policy8-1-1",
"policy8-1-2",
}
got := VaultPoliciesSet(input)
require.ElementsMatch(t, expected, got)
}
func TestVaultNamespaceSet(t *testing.T) {
input := map[string]map[string]*Vault{
"tg1": {
"task1": {
Namespace: "ns1-1",
},
"task2": {
Namespace: "ns1-2",
},
},
"tg2": {
"task1": {
Namespace: "ns2",
},
"task2": {
Namespace: "ns2",
},
},
"tg3": {
"task1": {
Namespace: "ns3-1",
},
},
"tg4": {
"task1": nil,
},
"tg5": {
"task1": {
Namespace: "ns2",
},
},
"tg6": {
"task1": {},
},
}
expected := []string{
"ns1-1",
"ns1-2",
"ns2",
"ns3-1",
}
got := VaultNamespaceSet(input)
require.ElementsMatch(t, expected, got)
}
// TestParsePortRanges asserts ParsePortRanges errors on invalid port ranges.
func TestParsePortRanges(t *testing.T) {
ci.Parallel(t)
cases := []struct {
name string
spec string
err string
}{
{
name: "UnmatchedDash",
spec: "-1",
err: `strconv.ParseUint: parsing "": invalid syntax`,
},
{
name: "Zero",
spec: "0",
err: "port must be > 0",
},
{
name: "TooBig",
spec: fmt.Sprintf("1-%d", MaxValidPort+1),
err: "port must be < 65536 but found 65537",
},
{
name: "WayTooBig", // would OOM if not caught early enough
spec: "9223372036854775807", // (2**63)-1
err: "port must be < 65536 but found 9223372036854775807",
},
}
for i := range cases {
tc := cases[i]
t.Run(tc.name, func(t *testing.T) {
results, err := ParsePortRanges(tc.spec)
require.Nil(t, results)
require.EqualError(t, err, tc.err)
})
}
}