2016-12-12 06:33:12 +00:00
|
|
|
package client
|
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
import (
|
2018-12-06 14:04:37 +00:00
|
|
|
"fmt"
|
2018-06-12 00:03:40 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-03-15 12:42:43 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2018-06-12 00:03:40 +00:00
|
|
|
"github.com/hashicorp/nomad/client/allocrunner"
|
2018-12-06 14:04:37 +00:00
|
|
|
"github.com/hashicorp/nomad/client/config"
|
2018-06-12 00:03:40 +00:00
|
|
|
"github.com/hashicorp/nomad/client/stats"
|
|
|
|
"github.com/hashicorp/nomad/helper/testlog"
|
2018-12-06 14:04:37 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad"
|
2018-06-12 00:03:40 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2018-12-06 14:04:37 +00:00
|
|
|
"github.com/hashicorp/nomad/testutil"
|
2018-12-05 15:24:06 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2018-06-12 00:03:40 +00:00
|
|
|
)
|
|
|
|
|
2017-05-11 00:39:45 +00:00
|
|
|
func gcConfig() *GCConfig {
|
|
|
|
return &GCConfig{
|
|
|
|
DiskUsageThreshold: 80,
|
|
|
|
InodeUsageThreshold: 70,
|
|
|
|
Interval: 1 * time.Minute,
|
|
|
|
ReservedDiskMB: 0,
|
|
|
|
MaxAllocs: 100,
|
|
|
|
}
|
2017-02-01 21:12:46 +00:00
|
|
|
}
|
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
// exitAllocRunner is a helper that updates the allocs on the given alloc
|
|
|
|
// runners to be terminal
|
2018-12-04 17:16:50 +00:00
|
|
|
func exitAllocRunner(runners ...AllocRunner) {
|
2018-06-12 00:03:40 +00:00
|
|
|
for _, ar := range runners {
|
2018-12-19 23:42:49 +00:00
|
|
|
terminalAlloc := ar.Alloc().Copy()
|
2018-06-12 00:03:40 +00:00
|
|
|
terminalAlloc.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
ar.Update(terminalAlloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-12 06:33:12 +00:00
|
|
|
func TestIndexedGCAllocPQ(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2016-12-12 06:33:12 +00:00
|
|
|
pq := NewIndexedGCAllocPQ()
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup1()
|
|
|
|
ar2, cleanup2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup2()
|
|
|
|
ar3, cleanup3 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup3()
|
|
|
|
ar4, cleanup4 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup4()
|
2016-12-12 06:33:12 +00:00
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
pq.Push(ar1.Alloc().ID, ar1)
|
|
|
|
pq.Push(ar2.Alloc().ID, ar2)
|
|
|
|
pq.Push(ar3.Alloc().ID, ar3)
|
|
|
|
pq.Push(ar4.Alloc().ID, ar4)
|
2016-12-12 06:33:12 +00:00
|
|
|
|
2016-12-16 07:54:54 +00:00
|
|
|
allocID := pq.Pop().allocRunner.Alloc().ID
|
2016-12-12 06:33:12 +00:00
|
|
|
if allocID != ar1.Alloc().ID {
|
|
|
|
t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID)
|
|
|
|
}
|
|
|
|
|
2016-12-16 07:54:54 +00:00
|
|
|
allocID = pq.Pop().allocRunner.Alloc().ID
|
2016-12-12 06:33:12 +00:00
|
|
|
if allocID != ar2.Alloc().ID {
|
|
|
|
t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID)
|
|
|
|
}
|
|
|
|
|
2016-12-16 07:54:54 +00:00
|
|
|
allocID = pq.Pop().allocRunner.Alloc().ID
|
2016-12-12 06:33:12 +00:00
|
|
|
if allocID != ar3.Alloc().ID {
|
|
|
|
t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID)
|
|
|
|
}
|
|
|
|
|
2016-12-16 07:54:54 +00:00
|
|
|
allocID = pq.Pop().allocRunner.Alloc().ID
|
2016-12-12 06:33:12 +00:00
|
|
|
if allocID != ar4.Alloc().ID {
|
|
|
|
t.Fatalf("expected alloc %v, got %v", allocID, ar1.Alloc().ID)
|
|
|
|
}
|
|
|
|
|
|
|
|
gcAlloc := pq.Pop()
|
|
|
|
if gcAlloc != nil {
|
|
|
|
t.Fatalf("expected nil, got %v", gcAlloc)
|
|
|
|
}
|
|
|
|
}
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2017-05-11 00:39:45 +00:00
|
|
|
// MockAllocCounter implements AllocCounter interface.
|
|
|
|
type MockAllocCounter struct {
|
|
|
|
allocs int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MockAllocCounter) NumAllocs() int {
|
|
|
|
return m.allocs
|
|
|
|
}
|
|
|
|
|
2016-12-19 18:51:57 +00:00
|
|
|
type MockStatsCollector struct {
|
|
|
|
availableValues []uint64
|
|
|
|
usedPercents []float64
|
|
|
|
inodePercents []float64
|
|
|
|
index int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MockStatsCollector) Collect() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *MockStatsCollector) Stats() *stats.HostStats {
|
|
|
|
if len(m.availableValues) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
available := m.availableValues[m.index]
|
|
|
|
usedPercent := m.usedPercents[m.index]
|
|
|
|
inodePercent := m.inodePercents[m.index]
|
|
|
|
|
|
|
|
if m.index < len(m.availableValues)-1 {
|
|
|
|
m.index = m.index + 1
|
|
|
|
}
|
|
|
|
return &stats.HostStats{
|
|
|
|
AllocDirStats: &stats.DiskStats{
|
|
|
|
Available: available,
|
|
|
|
UsedPercent: usedPercent,
|
|
|
|
InodesUsedPercent: inodePercent,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocGarbageCollector_MarkForCollection(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2017-05-11 00:39:45 +00:00
|
|
|
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup1()
|
|
|
|
|
|
|
|
gc.MarkForCollection(ar1.Alloc().ID, ar1)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
|
|
|
gcAlloc := gc.allocRunners.Pop()
|
|
|
|
if gcAlloc == nil || gcAlloc.allocRunner != ar1 {
|
|
|
|
t.Fatalf("bad gcAlloc: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocGarbageCollector_Collect(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2017-05-11 00:39:45 +00:00
|
|
|
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup1()
|
|
|
|
ar2, cleanup2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup2()
|
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
go ar1.Run()
|
|
|
|
go ar2.Run()
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
gc.MarkForCollection(ar1.Alloc().ID, ar1)
|
|
|
|
gc.MarkForCollection(ar2.Alloc().ID, ar2)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
// Exit the alloc runners
|
|
|
|
exitAllocRunner(ar1, ar2)
|
2017-03-16 20:22:08 +00:00
|
|
|
|
2017-10-19 00:06:46 +00:00
|
|
|
gc.Collect(ar1.Alloc().ID)
|
2016-12-19 18:51:57 +00:00
|
|
|
gcAlloc := gc.allocRunners.Pop()
|
|
|
|
if gcAlloc == nil || gcAlloc.allocRunner != ar2 {
|
|
|
|
t.Fatalf("bad gcAlloc: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocGarbageCollector_CollectAll(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2017-05-11 00:39:45 +00:00
|
|
|
gc := NewAllocGarbageCollector(logger, &MockStatsCollector{}, &MockAllocCounter{}, gcConfig())
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup1()
|
|
|
|
ar2, cleanup2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup2()
|
|
|
|
|
|
|
|
gc.MarkForCollection(ar1.Alloc().ID, ar1)
|
|
|
|
gc.MarkForCollection(ar2.Alloc().ID, ar2)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2017-10-19 00:06:46 +00:00
|
|
|
gc.CollectAll()
|
2016-12-19 18:51:57 +00:00
|
|
|
gcAlloc := gc.allocRunners.Pop()
|
|
|
|
if gcAlloc != nil {
|
|
|
|
t.Fatalf("bad gcAlloc: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2016-12-19 18:51:57 +00:00
|
|
|
statsCollector := &MockStatsCollector{}
|
2017-05-11 00:39:45 +00:00
|
|
|
conf := gcConfig()
|
|
|
|
conf.ReservedDiskMB = 20
|
|
|
|
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup1()
|
|
|
|
ar2, cleanup2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup2()
|
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
go ar1.Run()
|
|
|
|
go ar2.Run()
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
gc.MarkForCollection(ar1.Alloc().ID, ar1)
|
|
|
|
gc.MarkForCollection(ar2.Alloc().ID, ar2)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
// Exit the alloc runners
|
|
|
|
exitAllocRunner(ar1, ar2)
|
|
|
|
|
2016-12-19 18:51:57 +00:00
|
|
|
// Make stats collector report 200MB free out of which 20MB is reserved
|
|
|
|
statsCollector.availableValues = []uint64{200 * MB}
|
|
|
|
statsCollector.usedPercents = []float64{0}
|
|
|
|
statsCollector.inodePercents = []float64{0}
|
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
2018-10-03 16:47:18 +00:00
|
|
|
alloc.AllocatedResources.Shared.DiskMB = 150
|
2016-12-19 18:51:57 +00:00
|
|
|
if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// When we have enough disk available and don't need to do any GC so we
|
|
|
|
// should have two ARs in the GC queue
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil {
|
|
|
|
t.Fatalf("err: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2016-12-19 18:51:57 +00:00
|
|
|
statsCollector := &MockStatsCollector{}
|
2017-05-11 00:39:45 +00:00
|
|
|
conf := gcConfig()
|
|
|
|
conf.ReservedDiskMB = 20
|
|
|
|
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup1()
|
|
|
|
ar2, cleanup2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup2()
|
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
go ar1.Run()
|
|
|
|
go ar2.Run()
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
gc.MarkForCollection(ar1.Alloc().ID, ar1)
|
|
|
|
gc.MarkForCollection(ar2.Alloc().ID, ar2)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
// Exit the alloc runners
|
|
|
|
exitAllocRunner(ar1, ar2)
|
|
|
|
|
2016-12-19 18:51:57 +00:00
|
|
|
// Make stats collector report 80MB and 175MB free in subsequent calls
|
|
|
|
statsCollector.availableValues = []uint64{80 * MB, 80 * MB, 175 * MB}
|
|
|
|
statsCollector.usedPercents = []float64{0, 0, 0}
|
|
|
|
statsCollector.inodePercents = []float64{0, 0, 0}
|
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
2018-10-03 16:47:18 +00:00
|
|
|
alloc.AllocatedResources.Shared.DiskMB = 150
|
2016-12-19 18:51:57 +00:00
|
|
|
if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should be GC-ing one alloc
|
|
|
|
if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil {
|
|
|
|
t.Fatalf("err: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil {
|
|
|
|
t.Fatalf("gcAlloc: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2016-12-19 18:51:57 +00:00
|
|
|
statsCollector := &MockStatsCollector{}
|
2017-05-11 00:39:45 +00:00
|
|
|
conf := gcConfig()
|
|
|
|
conf.ReservedDiskMB = 20
|
|
|
|
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup1()
|
|
|
|
ar2, cleanup2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup2()
|
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
go ar1.Run()
|
|
|
|
go ar2.Run()
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
gc.MarkForCollection(ar1.Alloc().ID, ar1)
|
|
|
|
gc.MarkForCollection(ar2.Alloc().ID, ar2)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
// Exit the alloc runners
|
|
|
|
exitAllocRunner(ar1, ar2)
|
|
|
|
|
2016-12-19 18:51:57 +00:00
|
|
|
// Make stats collector report 80MB and 95MB free in subsequent calls
|
|
|
|
statsCollector.availableValues = []uint64{80 * MB, 80 * MB, 95 * MB}
|
|
|
|
statsCollector.usedPercents = []float64{0, 0, 0}
|
|
|
|
statsCollector.inodePercents = []float64{0, 0, 0}
|
|
|
|
|
|
|
|
alloc := mock.Alloc()
|
2018-10-03 16:47:18 +00:00
|
|
|
alloc.AllocatedResources.Shared.DiskMB = 150
|
2016-12-19 18:51:57 +00:00
|
|
|
if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should be GC-ing all the alloc runners
|
|
|
|
if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil {
|
|
|
|
t.Fatalf("gcAlloc: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2016-12-19 18:51:57 +00:00
|
|
|
statsCollector := &MockStatsCollector{}
|
2017-05-11 00:39:45 +00:00
|
|
|
conf := gcConfig()
|
|
|
|
conf.ReservedDiskMB = 20
|
|
|
|
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
cleanup1()
|
|
|
|
ar2, cleanup2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
cleanup2()
|
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
go ar1.Run()
|
|
|
|
go ar2.Run()
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
gc.MarkForCollection(ar1.Alloc().ID, ar1)
|
|
|
|
gc.MarkForCollection(ar2.Alloc().ID, ar2)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
// Exit the alloc runners
|
|
|
|
exitAllocRunner(ar1, ar2)
|
|
|
|
|
2016-12-19 18:51:57 +00:00
|
|
|
alloc := mock.Alloc()
|
2018-10-03 16:47:18 +00:00
|
|
|
alloc.AllocatedResources.Shared.DiskMB = 150
|
2016-12-19 18:51:57 +00:00
|
|
|
if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should be GC-ing one alloc
|
|
|
|
if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil {
|
|
|
|
t.Fatalf("err: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil {
|
|
|
|
t.Fatalf("gcAlloc: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-05 15:24:06 +00:00
|
|
|
// TestAllocGarbageCollector_MakeRoomFor_MaxAllocs asserts that when making room for new
|
2017-10-27 22:55:51 +00:00
|
|
|
// allocs, terminal allocs are GC'd until old_allocs + new_allocs <= limit
|
2018-12-05 15:24:06 +00:00
|
|
|
func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-12-06 14:04:37 +00:00
|
|
|
const maxAllocs = 6
|
|
|
|
require := require.New(t)
|
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
server, serverAddr, cleanupS := testServer(t, nil)
|
|
|
|
defer cleanupS()
|
2018-12-06 14:04:37 +00:00
|
|
|
testutil.WaitForLeader(t, server.RPC)
|
|
|
|
|
|
|
|
client, cleanup := TestClient(t, func(c *config.Config) {
|
|
|
|
c.GCMaxAllocs = maxAllocs
|
|
|
|
c.GCDiskUsageThreshold = 100
|
|
|
|
c.GCInodeUsageThreshold = 100
|
|
|
|
c.GCParallelDestroys = 1
|
|
|
|
c.GCInterval = time.Hour
|
|
|
|
c.RPCHandler = server
|
|
|
|
c.Servers = []string{serverAddr}
|
|
|
|
c.ConsulConfig.ClientAutoJoin = new(bool)
|
|
|
|
})
|
|
|
|
defer cleanup()
|
|
|
|
waitTilNodeReady(client, t)
|
|
|
|
|
|
|
|
job := mock.Job()
|
|
|
|
job.TaskGroups[0].Count = 1
|
|
|
|
job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
|
|
|
|
job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
|
|
|
|
"run_for": "30s",
|
|
|
|
}
|
2017-10-27 22:55:51 +00:00
|
|
|
|
2018-12-06 14:04:37 +00:00
|
|
|
index := uint64(98)
|
|
|
|
nextIndex := func() uint64 {
|
|
|
|
index++
|
|
|
|
return index
|
|
|
|
}
|
2017-10-19 00:06:46 +00:00
|
|
|
|
2018-12-06 14:04:37 +00:00
|
|
|
upsertJobFn := func(server *nomad.Server, j *structs.Job) {
|
|
|
|
state := server.State()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(state.UpsertJob(structs.MsgTypeTestSetup, nextIndex(), j))
|
2018-12-06 14:04:37 +00:00
|
|
|
require.NoError(state.UpsertJobSummary(nextIndex(), mock.JobSummary(j.ID)))
|
|
|
|
}
|
2017-10-19 00:06:46 +00:00
|
|
|
|
2018-12-06 14:04:37 +00:00
|
|
|
// Insert the Job
|
|
|
|
upsertJobFn(server, job)
|
2017-10-19 00:06:46 +00:00
|
|
|
|
2018-12-06 14:04:37 +00:00
|
|
|
upsertAllocFn := func(server *nomad.Server, a *structs.Allocation) {
|
|
|
|
state := server.State()
|
2020-10-19 13:30:15 +00:00
|
|
|
require.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(), []*structs.Allocation{a}))
|
2018-12-06 14:04:37 +00:00
|
|
|
}
|
2017-10-19 00:06:46 +00:00
|
|
|
|
2018-12-06 14:04:37 +00:00
|
|
|
upsertNewAllocFn := func(server *nomad.Server, j *structs.Job) *structs.Allocation {
|
2018-12-05 15:24:06 +00:00
|
|
|
alloc := mock.Alloc()
|
2018-12-06 14:04:37 +00:00
|
|
|
alloc.Job = j
|
|
|
|
alloc.JobID = j.ID
|
|
|
|
alloc.NodeID = client.NodeID()
|
|
|
|
|
|
|
|
upsertAllocFn(server, alloc)
|
|
|
|
|
|
|
|
return alloc.Copy()
|
2017-10-19 00:06:46 +00:00
|
|
|
}
|
|
|
|
|
2018-12-06 14:04:37 +00:00
|
|
|
var allocations []*structs.Allocation
|
2017-10-19 00:06:46 +00:00
|
|
|
|
2018-12-06 14:04:37 +00:00
|
|
|
// Fill the node with allocations
|
|
|
|
for i := 0; i < maxAllocs; i++ {
|
|
|
|
allocations = append(allocations, upsertNewAllocFn(server, job))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait until the allocations are ready
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
ar := len(client.getAllocRunners())
|
|
|
|
|
|
|
|
return ar == maxAllocs, fmt.Errorf("Expected %d allocs, got %d", maxAllocs, ar)
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("Allocs did not start: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Mark the first three as terminal
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
allocations[i].DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
upsertAllocFn(server, allocations[i].Copy())
|
2017-10-27 22:55:51 +00:00
|
|
|
}
|
2017-10-19 00:06:46 +00:00
|
|
|
|
2018-12-06 14:04:37 +00:00
|
|
|
// Wait until the allocations are stopped
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
ar := client.getAllocRunners()
|
|
|
|
stopped := 0
|
|
|
|
for _, r := range ar {
|
|
|
|
if r.Alloc().TerminalStatus() {
|
|
|
|
stopped++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return stopped == 3, fmt.Errorf("Expected %d terminal allocs, got %d", 3, stopped)
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("Allocs did not terminate: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Upsert a new allocation
|
|
|
|
// This does not get appended to `allocations` as we do not use them again.
|
|
|
|
upsertNewAllocFn(server, job)
|
|
|
|
|
|
|
|
// A single allocation should be GC'd
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
ar := client.getAllocRunners()
|
|
|
|
destroyed := 0
|
|
|
|
for _, r := range ar {
|
|
|
|
if r.IsDestroyed() {
|
|
|
|
destroyed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return destroyed == 1, fmt.Errorf("Expected %d gc'd ars, got %d", 1, destroyed)
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("Allocs did not get GC'd: %v", err)
|
|
|
|
})
|
|
|
|
|
|
|
|
// Upsert a new allocation
|
|
|
|
// This does not get appended to `allocations` as we do not use them again.
|
|
|
|
upsertNewAllocFn(server, job)
|
|
|
|
|
|
|
|
// 2 allocations should be GC'd
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
ar := client.getAllocRunners()
|
|
|
|
destroyed := 0
|
|
|
|
for _, r := range ar {
|
|
|
|
if r.IsDestroyed() {
|
|
|
|
destroyed++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return destroyed == 2, fmt.Errorf("Expected %d gc'd ars, got %d", 2, destroyed)
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("Allocs did not get GC'd: %v", err)
|
|
|
|
})
|
|
|
|
|
2020-03-30 01:33:31 +00:00
|
|
|
// check that all 8 get run eventually
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
ar := client.getAllocRunners()
|
|
|
|
if len(ar) != 8 {
|
|
|
|
return false, fmt.Errorf("expected 8 ARs, found %d: %v", len(ar), ar)
|
|
|
|
}
|
|
|
|
return true, nil
|
|
|
|
}, func(err error) {
|
|
|
|
require.NoError(err)
|
|
|
|
})
|
2017-05-11 00:39:45 +00:00
|
|
|
}
|
|
|
|
|
2016-12-19 18:51:57 +00:00
|
|
|
func TestAllocGarbageCollector_UsageBelowThreshold(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2016-12-19 18:51:57 +00:00
|
|
|
statsCollector := &MockStatsCollector{}
|
2017-05-11 00:39:45 +00:00
|
|
|
conf := gcConfig()
|
|
|
|
conf.ReservedDiskMB = 20
|
|
|
|
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup1()
|
|
|
|
ar2, cleanup2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup2()
|
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
go ar1.Run()
|
|
|
|
go ar2.Run()
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
gc.MarkForCollection(ar1.Alloc().ID, ar1)
|
|
|
|
gc.MarkForCollection(ar2.Alloc().ID, ar2)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
// Exit the alloc runners
|
|
|
|
exitAllocRunner(ar1, ar2)
|
|
|
|
|
2016-12-19 18:51:57 +00:00
|
|
|
statsCollector.availableValues = []uint64{1000}
|
|
|
|
statsCollector.usedPercents = []float64{20}
|
|
|
|
statsCollector.inodePercents = []float64{10}
|
|
|
|
|
|
|
|
if err := gc.keepUsageBelowThreshold(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-12-20 01:53:11 +00:00
|
|
|
|
|
|
|
// We shouldn't GC any of the allocs since the used percent values are below
|
|
|
|
// threshold
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil {
|
|
|
|
t.Fatalf("err: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
}
|
2016-12-19 18:51:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAllocGarbageCollector_UsedPercentThreshold(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
logger := testlog.HCLogger(t)
|
2016-12-19 18:51:57 +00:00
|
|
|
statsCollector := &MockStatsCollector{}
|
2017-05-11 00:39:45 +00:00
|
|
|
conf := gcConfig()
|
|
|
|
conf.ReservedDiskMB = 20
|
|
|
|
gc := NewAllocGarbageCollector(logger, statsCollector, &MockAllocCounter{}, conf)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
ar1, cleanup1 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup1()
|
|
|
|
ar2, cleanup2 := allocrunner.TestAllocRunnerFromAlloc(t, mock.Alloc())
|
|
|
|
defer cleanup2()
|
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
go ar1.Run()
|
|
|
|
go ar2.Run()
|
|
|
|
|
2018-12-04 17:16:50 +00:00
|
|
|
gc.MarkForCollection(ar1.Alloc().ID, ar1)
|
|
|
|
gc.MarkForCollection(ar2.Alloc().ID, ar2)
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2018-06-12 00:03:40 +00:00
|
|
|
// Exit the alloc runners
|
|
|
|
exitAllocRunner(ar1, ar2)
|
|
|
|
|
2016-12-19 18:51:57 +00:00
|
|
|
statsCollector.availableValues = []uint64{1000, 800}
|
|
|
|
statsCollector.usedPercents = []float64{85, 60}
|
|
|
|
statsCollector.inodePercents = []float64{50, 30}
|
|
|
|
|
|
|
|
if err := gc.keepUsageBelowThreshold(); err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-12-20 01:53:11 +00:00
|
|
|
|
|
|
|
// We should be GC-ing only one of the alloc runners since the second time
|
|
|
|
// used percent returns a number below threshold.
|
|
|
|
if gcAlloc := gc.allocRunners.Pop(); gcAlloc == nil {
|
|
|
|
t.Fatalf("err: %v", gcAlloc)
|
|
|
|
}
|
|
|
|
|
|
|
|
if gcAlloc := gc.allocRunners.Pop(); gcAlloc != nil {
|
|
|
|
t.Fatalf("gcAlloc: %v", gcAlloc)
|
|
|
|
}
|
2016-12-19 18:51:57 +00:00
|
|
|
}
|