2016-12-12 06:33:12 +00:00
|
|
|
package client
|
|
|
|
|
|
|
|
import (
|
|
|
|
"container/heap"
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2016-12-12 07:08:13 +00:00
|
|
|
"github.com/hashicorp/nomad/client/stats"
|
2016-12-12 06:33:12 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
)
|
|
|
|
|
2016-12-16 07:54:54 +00:00
|
|
|
const (
|
2016-12-19 18:51:57 +00:00
|
|
|
// MB is a constant which converts values in bytes to MB
|
|
|
|
MB = 1024 * 1024
|
2017-03-11 00:27:00 +00:00
|
|
|
)
|
2016-12-12 06:33:12 +00:00
|
|
|
|
2017-01-31 23:32:20 +00:00
|
|
|
// GCConfig allows changing the behaviour of the garbage collector
|
|
|
|
type GCConfig struct {
|
2017-05-11 00:39:45 +00:00
|
|
|
// MaxAllocs is the maximum number of allocations to track before a GC
|
|
|
|
// is triggered.
|
|
|
|
MaxAllocs int
|
2017-01-31 23:32:20 +00:00
|
|
|
DiskUsageThreshold float64
|
|
|
|
InodeUsageThreshold float64
|
|
|
|
Interval time.Duration
|
|
|
|
ReservedDiskMB int
|
2017-03-11 00:27:00 +00:00
|
|
|
ParallelDestroys int
|
2017-01-31 23:32:20 +00:00
|
|
|
}
|
|
|
|
|
2017-05-11 00:39:45 +00:00
|
|
|
// AllocCounter is used by AllocGarbageCollector to discover how many
|
|
|
|
// allocations a node has and is generally fulfilled by the Client.
|
|
|
|
type AllocCounter interface {
|
|
|
|
NumAllocs() int
|
|
|
|
}
|
|
|
|
|
2016-12-12 06:33:12 +00:00
|
|
|
// AllocGarbageCollector garbage collects terminated allocations on a node
|
|
|
|
type AllocGarbageCollector struct {
|
2016-12-12 07:08:13 +00:00
|
|
|
allocRunners *IndexedGCAllocPQ
|
2016-12-16 07:54:54 +00:00
|
|
|
statsCollector stats.NodeStatsCollector
|
2017-05-11 00:39:45 +00:00
|
|
|
allocCounter AllocCounter
|
2017-01-31 23:32:20 +00:00
|
|
|
config *GCConfig
|
2016-12-12 07:08:13 +00:00
|
|
|
logger *log.Logger
|
2017-03-11 00:27:00 +00:00
|
|
|
destroyCh chan struct{}
|
2016-12-16 07:54:54 +00:00
|
|
|
shutdownCh chan struct{}
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewAllocGarbageCollector returns a garbage collector for terminated
|
2017-05-11 00:39:45 +00:00
|
|
|
// allocations on a node. Must call Run() in a goroutine enable periodic
|
|
|
|
// garbage collection.
|
|
|
|
func NewAllocGarbageCollector(logger *log.Logger, statsCollector stats.NodeStatsCollector, ac AllocCounter, config *GCConfig) *AllocGarbageCollector {
|
2017-03-11 00:27:00 +00:00
|
|
|
// Require at least 1 to make progress
|
|
|
|
if config.ParallelDestroys <= 0 {
|
|
|
|
logger.Printf("[WARN] client: garbage collector defaulting parallism to 1 due to invalid input value of %d", config.ParallelDestroys)
|
|
|
|
config.ParallelDestroys = 1
|
|
|
|
}
|
|
|
|
|
2016-12-16 07:54:54 +00:00
|
|
|
gc := &AllocGarbageCollector{
|
2016-12-12 07:08:13 +00:00
|
|
|
allocRunners: NewIndexedGCAllocPQ(),
|
|
|
|
statsCollector: statsCollector,
|
2017-05-11 00:39:45 +00:00
|
|
|
allocCounter: ac,
|
2017-01-31 23:32:20 +00:00
|
|
|
config: config,
|
2016-12-12 07:08:13 +00:00
|
|
|
logger: logger,
|
2017-03-11 00:27:00 +00:00
|
|
|
destroyCh: make(chan struct{}, config.ParallelDestroys),
|
2016-12-16 07:54:54 +00:00
|
|
|
shutdownCh: make(chan struct{}),
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
2016-12-16 07:54:54 +00:00
|
|
|
|
|
|
|
return gc
|
|
|
|
}
|
|
|
|
|
2017-05-11 00:39:45 +00:00
|
|
|
// Run the periodic garbage collector.
|
|
|
|
func (a *AllocGarbageCollector) Run() {
|
2017-01-31 23:32:20 +00:00
|
|
|
ticker := time.NewTicker(a.config.Interval)
|
2016-12-16 07:54:54 +00:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
if err := a.keepUsageBelowThreshold(); err != nil {
|
2016-12-20 01:53:11 +00:00
|
|
|
a.logger.Printf("[ERR] client: error garbage collecting allocation: %v", err)
|
2016-12-16 07:54:54 +00:00
|
|
|
}
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
ticker.Stop()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// keepUsageBelowThreshold collects disk usage information and garbage collects
|
|
|
|
// allocations to make disk space available.
|
|
|
|
func (a *AllocGarbageCollector) keepUsageBelowThreshold() error {
|
|
|
|
for {
|
2017-03-11 00:27:00 +00:00
|
|
|
select {
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2016-12-16 07:54:54 +00:00
|
|
|
// Check if we have enough free space
|
|
|
|
err := a.statsCollector.Collect()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// See if we are below thresholds for used disk space and inode usage
|
2017-01-10 22:14:58 +00:00
|
|
|
// TODO(diptanu) figure out why this is nil
|
|
|
|
stats := a.statsCollector.Stats()
|
|
|
|
if stats == nil {
|
|
|
|
break
|
|
|
|
}
|
2016-12-19 18:51:57 +00:00
|
|
|
|
2017-01-10 22:14:58 +00:00
|
|
|
diskStats := stats.AllocDirStats
|
2016-12-19 18:51:57 +00:00
|
|
|
if diskStats == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2017-05-11 00:39:45 +00:00
|
|
|
reason := ""
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case diskStats.UsedPercent > a.config.DiskUsageThreshold:
|
|
|
|
reason = fmt.Sprintf("disk usage of %.0f is over gc threshold of %.0f",
|
|
|
|
diskStats.UsedPercent, a.config.DiskUsageThreshold)
|
|
|
|
case diskStats.InodesUsedPercent > a.config.InodeUsageThreshold:
|
|
|
|
reason = fmt.Sprintf("inode usage of %.0f is over gc threshold of %.0f",
|
|
|
|
diskStats.InodesUsedPercent, a.config.InodeUsageThreshold)
|
|
|
|
case a.numAllocs() > a.config.MaxAllocs:
|
|
|
|
reason = fmt.Sprintf("number of allocations is over the limit (%d)", a.config.MaxAllocs)
|
|
|
|
}
|
|
|
|
|
|
|
|
// No reason to gc, exit
|
|
|
|
if reason == "" {
|
2016-12-16 07:54:54 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect an allocation
|
|
|
|
gcAlloc := a.allocRunners.Pop()
|
|
|
|
if gcAlloc == nil {
|
2017-05-11 00:39:45 +00:00
|
|
|
a.logger.Printf("[WARN] client: garbage collection due to %s skipped because no terminal allocations", reason)
|
2016-12-16 07:54:54 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Destroy the alloc runner and wait until it exits
|
2017-05-11 00:39:45 +00:00
|
|
|
a.destroyAllocRunner(gcAlloc.allocRunner, reason)
|
2017-03-11 00:27:00 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// destroyAllocRunner is used to destroy an allocation runner. It will acquire a
|
|
|
|
// lock to restrict parallelism and then destroy the alloc runner, returning
|
|
|
|
// once the allocation has been destroyed.
|
2017-05-11 00:39:45 +00:00
|
|
|
func (a *AllocGarbageCollector) destroyAllocRunner(ar *AllocRunner, reason string) {
|
|
|
|
id := "<nil>"
|
|
|
|
if alloc := ar.Alloc(); alloc != nil {
|
|
|
|
id = alloc.ID
|
|
|
|
}
|
|
|
|
a.logger.Printf("[INFO] client: garbage collecting allocation %s due to %s", id, reason)
|
|
|
|
|
2017-03-11 00:27:00 +00:00
|
|
|
// Acquire the destroy lock
|
|
|
|
select {
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
return
|
2017-03-14 17:45:15 +00:00
|
|
|
case a.destroyCh <- struct{}{}:
|
2017-03-11 00:27:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ar.Destroy()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ar.WaitCh():
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
}
|
|
|
|
|
|
|
|
a.logger.Printf("[DEBUG] client: garbage collected %q", ar.Alloc().ID)
|
|
|
|
|
2017-03-14 17:45:15 +00:00
|
|
|
// Release the lock
|
|
|
|
<-a.destroyCh
|
2016-12-16 07:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (a *AllocGarbageCollector) Stop() {
|
2016-12-20 01:53:11 +00:00
|
|
|
close(a.shutdownCh)
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Collect garbage collects a single allocation on a node
|
|
|
|
func (a *AllocGarbageCollector) Collect(allocID string) error {
|
|
|
|
gcAlloc, err := a.allocRunners.Remove(allocID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to collect allocation %q: %v", allocID, err)
|
|
|
|
}
|
2017-05-11 00:39:45 +00:00
|
|
|
a.destroyAllocRunner(gcAlloc.allocRunner, "forced collection")
|
2016-12-12 06:33:12 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CollectAll garbage collects all termianated allocations on a node
|
|
|
|
func (a *AllocGarbageCollector) CollectAll() error {
|
|
|
|
for {
|
2017-03-11 00:27:00 +00:00
|
|
|
select {
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2016-12-12 06:33:12 +00:00
|
|
|
gcAlloc := a.allocRunners.Pop()
|
|
|
|
if gcAlloc == nil {
|
|
|
|
break
|
|
|
|
}
|
2017-03-11 00:27:00 +00:00
|
|
|
|
2017-05-11 00:39:45 +00:00
|
|
|
go a.destroyAllocRunner(gcAlloc.allocRunner, "forced full collection")
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MakeRoomFor garbage collects enough number of allocations in the terminal
|
|
|
|
// state to make room for new allocations
|
|
|
|
func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) error {
|
2017-05-11 00:39:45 +00:00
|
|
|
// GC allocs until below the max limit + the new allocations
|
|
|
|
max := a.config.MaxAllocs - len(allocations)
|
|
|
|
for a.numAllocs() > max {
|
|
|
|
select {
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
gcAlloc := a.allocRunners.Pop()
|
|
|
|
if gcAlloc == nil {
|
|
|
|
// It's fine if we can't lower below the limit here as
|
|
|
|
// we'll keep trying to drop below the limit with each
|
|
|
|
// periodic gc
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Destroy the alloc runner and wait until it exits
|
|
|
|
a.destroyAllocRunner(gcAlloc.allocRunner, "new allocations")
|
|
|
|
}
|
2016-12-12 06:33:12 +00:00
|
|
|
totalResource := &structs.Resources{}
|
|
|
|
for _, alloc := range allocations {
|
|
|
|
if err := totalResource.Add(alloc.Resources); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-07 21:13:05 +00:00
|
|
|
// If the host has enough free space to accommodate the new allocations then
|
2016-12-16 07:54:54 +00:00
|
|
|
// we don't need to garbage collect terminated allocations
|
2016-12-19 18:51:57 +00:00
|
|
|
if hostStats := a.statsCollector.Stats(); hostStats != nil {
|
|
|
|
var availableForAllocations uint64
|
2017-01-31 23:32:20 +00:00
|
|
|
if hostStats.AllocDirStats.Available < uint64(a.config.ReservedDiskMB*MB) {
|
2016-12-19 18:51:57 +00:00
|
|
|
availableForAllocations = 0
|
|
|
|
} else {
|
2017-01-31 23:32:20 +00:00
|
|
|
availableForAllocations = hostStats.AllocDirStats.Available - uint64(a.config.ReservedDiskMB*MB)
|
2016-12-19 18:51:57 +00:00
|
|
|
}
|
|
|
|
if uint64(totalResource.DiskMB*MB) < availableForAllocations {
|
|
|
|
return nil
|
|
|
|
}
|
2016-12-16 07:54:54 +00:00
|
|
|
}
|
|
|
|
|
2016-12-12 06:33:12 +00:00
|
|
|
var diskCleared int
|
|
|
|
for {
|
2017-03-11 00:27:00 +00:00
|
|
|
select {
|
|
|
|
case <-a.shutdownCh:
|
|
|
|
return nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2016-12-16 07:54:54 +00:00
|
|
|
// Collect host stats and see if we still need to remove older
|
|
|
|
// allocations
|
2016-12-19 18:51:57 +00:00
|
|
|
var allocDirStats *stats.DiskStats
|
2016-12-16 07:54:54 +00:00
|
|
|
if err := a.statsCollector.Collect(); err == nil {
|
2016-12-19 18:51:57 +00:00
|
|
|
if hostStats := a.statsCollector.Stats(); hostStats != nil {
|
|
|
|
allocDirStats = hostStats.AllocDirStats
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if allocDirStats != nil {
|
2016-12-20 01:53:11 +00:00
|
|
|
if allocDirStats.Available >= uint64(totalResource.DiskMB*MB) {
|
2016-12-16 07:54:54 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Falling back to a simpler model to know if we have enough disk
|
|
|
|
// space if stats collection fails
|
|
|
|
if diskCleared >= totalResource.DiskMB {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-12 06:33:12 +00:00
|
|
|
gcAlloc := a.allocRunners.Pop()
|
|
|
|
if gcAlloc == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2016-12-12 06:40:11 +00:00
|
|
|
ar := gcAlloc.allocRunner
|
2016-12-12 06:33:12 +00:00
|
|
|
alloc := ar.Alloc()
|
2016-12-16 07:54:54 +00:00
|
|
|
|
|
|
|
// Destroy the alloc runner and wait until it exits
|
2017-05-11 00:39:45 +00:00
|
|
|
a.destroyAllocRunner(ar, fmt.Sprintf("freeing %d MB for new allocations", alloc.Resources.DiskMB))
|
2016-12-16 07:54:54 +00:00
|
|
|
|
|
|
|
// Call stats collect again
|
|
|
|
diskCleared += alloc.Resources.DiskMB
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarkForCollection starts tracking an allocation for Garbage Collection
|
|
|
|
func (a *AllocGarbageCollector) MarkForCollection(ar *AllocRunner) error {
|
|
|
|
if ar == nil {
|
|
|
|
return fmt.Errorf("nil allocation runner inserted for garbage collection")
|
|
|
|
}
|
|
|
|
if ar.Alloc() == nil {
|
2017-05-11 00:39:45 +00:00
|
|
|
a.destroyAllocRunner(ar, "alloc is nil")
|
2016-12-12 06:33:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
a.logger.Printf("[INFO] client: marking allocation %v for GC", ar.Alloc().ID)
|
|
|
|
return a.allocRunners.Push(ar)
|
|
|
|
}
|
2016-12-20 19:14:22 +00:00
|
|
|
|
|
|
|
// Remove removes an alloc runner without garbage collecting it
|
|
|
|
func (a *AllocGarbageCollector) Remove(ar *AllocRunner) {
|
|
|
|
if ar == nil || ar.Alloc() == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
alloc := ar.Alloc()
|
|
|
|
if _, err := a.allocRunners.Remove(alloc.ID); err == nil {
|
|
|
|
a.logger.Printf("[INFO] client: removed alloc runner %v from garbage collector", alloc.ID)
|
|
|
|
}
|
|
|
|
}
|
2017-03-11 00:27:00 +00:00
|
|
|
|
2017-05-11 00:39:45 +00:00
|
|
|
// numAllocs returns the total number of allocs tracked by the client as well
|
|
|
|
// as those marked for GC.
|
|
|
|
func (a *AllocGarbageCollector) numAllocs() int {
|
|
|
|
return a.allocRunners.Length() + a.allocCounter.NumAllocs()
|
|
|
|
}
|
|
|
|
|
2017-03-11 00:27:00 +00:00
|
|
|
// GCAlloc wraps an allocation runner and an index enabling it to be used within
|
|
|
|
// a PQ
|
|
|
|
type GCAlloc struct {
|
|
|
|
timeStamp time.Time
|
|
|
|
allocRunner *AllocRunner
|
|
|
|
index int
|
|
|
|
}
|
|
|
|
|
|
|
|
type GCAllocPQImpl []*GCAlloc
|
|
|
|
|
|
|
|
func (pq GCAllocPQImpl) Len() int {
|
|
|
|
return len(pq)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pq GCAllocPQImpl) Less(i, j int) bool {
|
|
|
|
return pq[i].timeStamp.Before(pq[j].timeStamp)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pq GCAllocPQImpl) Swap(i, j int) {
|
|
|
|
pq[i], pq[j] = pq[j], pq[i]
|
|
|
|
pq[i].index = i
|
|
|
|
pq[j].index = j
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pq *GCAllocPQImpl) Push(x interface{}) {
|
|
|
|
n := len(*pq)
|
|
|
|
item := x.(*GCAlloc)
|
|
|
|
item.index = n
|
|
|
|
*pq = append(*pq, item)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pq *GCAllocPQImpl) Pop() interface{} {
|
|
|
|
old := *pq
|
|
|
|
n := len(old)
|
|
|
|
item := old[n-1]
|
|
|
|
item.index = -1 // for safety
|
|
|
|
*pq = old[0 : n-1]
|
|
|
|
return item
|
|
|
|
}
|
|
|
|
|
|
|
|
// IndexedGCAllocPQ is an indexed PQ which maintains a list of allocation runner
|
|
|
|
// based on their termination time.
|
|
|
|
type IndexedGCAllocPQ struct {
|
|
|
|
index map[string]*GCAlloc
|
|
|
|
heap GCAllocPQImpl
|
|
|
|
|
|
|
|
pqLock sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewIndexedGCAllocPQ() *IndexedGCAllocPQ {
|
|
|
|
return &IndexedGCAllocPQ{
|
|
|
|
index: make(map[string]*GCAlloc),
|
|
|
|
heap: make(GCAllocPQImpl, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *IndexedGCAllocPQ) Push(ar *AllocRunner) error {
|
|
|
|
i.pqLock.Lock()
|
|
|
|
defer i.pqLock.Unlock()
|
|
|
|
|
|
|
|
alloc := ar.Alloc()
|
|
|
|
if _, ok := i.index[alloc.ID]; ok {
|
|
|
|
// No work to do
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
gcAlloc := &GCAlloc{
|
|
|
|
timeStamp: time.Now(),
|
|
|
|
allocRunner: ar,
|
|
|
|
}
|
|
|
|
i.index[alloc.ID] = gcAlloc
|
|
|
|
heap.Push(&i.heap, gcAlloc)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *IndexedGCAllocPQ) Pop() *GCAlloc {
|
|
|
|
i.pqLock.Lock()
|
|
|
|
defer i.pqLock.Unlock()
|
|
|
|
|
|
|
|
if len(i.heap) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
gcAlloc := heap.Pop(&i.heap).(*GCAlloc)
|
|
|
|
delete(i.index, gcAlloc.allocRunner.Alloc().ID)
|
|
|
|
return gcAlloc
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *IndexedGCAllocPQ) Remove(allocID string) (*GCAlloc, error) {
|
|
|
|
i.pqLock.Lock()
|
|
|
|
defer i.pqLock.Unlock()
|
|
|
|
|
|
|
|
if gcAlloc, ok := i.index[allocID]; ok {
|
|
|
|
heap.Remove(&i.heap, gcAlloc.index)
|
|
|
|
delete(i.index, allocID)
|
|
|
|
return gcAlloc, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, fmt.Errorf("alloc %q not present", allocID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (i *IndexedGCAllocPQ) Length() int {
|
|
|
|
i.pqLock.Lock()
|
|
|
|
defer i.pqLock.Unlock()
|
|
|
|
|
|
|
|
return len(i.heap)
|
|
|
|
}
|