2017-05-23 00:14:38 +00:00
|
|
|
package scheduler
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2017-06-02 23:11:29 +00:00
|
|
|
"sort"
|
|
|
|
"strings"
|
2017-05-23 00:14:38 +00:00
|
|
|
|
2018-01-17 19:22:30 +00:00
|
|
|
"time"
|
|
|
|
|
2017-05-23 00:14:38 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
)
|
|
|
|
|
2017-12-13 17:36:03 +00:00
|
|
|
// placementResult is an allocation that must be placed. It potentially has a
|
2017-07-15 23:31:33 +00:00
|
|
|
// previous allocation attached to it that should be stopped only if the
|
|
|
|
// paired placement is complete. This gives an atomic place/stop behavior to
|
|
|
|
// prevent an impossible resource ask as part of a rolling update to wipe the
|
|
|
|
// job out.
|
|
|
|
type placementResult interface {
|
|
|
|
// TaskGroup returns the task group the placement is for
|
|
|
|
TaskGroup() *structs.TaskGroup
|
|
|
|
|
|
|
|
// Name returns the name of the desired allocation
|
|
|
|
Name() string
|
|
|
|
|
|
|
|
// Canary returns whether the placement should be a canary
|
|
|
|
Canary() bool
|
|
|
|
|
|
|
|
// PreviousAllocation returns the previous allocation
|
|
|
|
PreviousAllocation() *structs.Allocation
|
|
|
|
|
2018-01-19 21:20:00 +00:00
|
|
|
// IsRescheduling returns whether the placement was rescheduling a failed allocation
|
|
|
|
IsRescheduling() bool
|
2018-01-19 19:21:50 +00:00
|
|
|
|
2017-07-15 23:31:33 +00:00
|
|
|
// StopPreviousAlloc returns whether the previous allocation should be
|
|
|
|
// stopped and if so the status description.
|
|
|
|
StopPreviousAlloc() (bool, string)
|
|
|
|
}
|
|
|
|
|
|
|
|
// allocStopResult contains the information required to stop a single allocation
|
|
|
|
type allocStopResult struct {
|
|
|
|
alloc *structs.Allocation
|
|
|
|
clientStatus string
|
|
|
|
statusDescription string
|
|
|
|
}
|
|
|
|
|
|
|
|
// allocPlaceResult contains the information required to place a single
|
|
|
|
// allocation
|
|
|
|
type allocPlaceResult struct {
|
|
|
|
name string
|
|
|
|
canary bool
|
|
|
|
taskGroup *structs.TaskGroup
|
|
|
|
previousAlloc *structs.Allocation
|
2018-01-19 19:21:50 +00:00
|
|
|
reschedule bool
|
2017-07-15 23:31:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (a allocPlaceResult) TaskGroup() *structs.TaskGroup { return a.taskGroup }
|
|
|
|
func (a allocPlaceResult) Name() string { return a.name }
|
|
|
|
func (a allocPlaceResult) Canary() bool { return a.canary }
|
|
|
|
func (a allocPlaceResult) PreviousAllocation() *structs.Allocation { return a.previousAlloc }
|
2018-01-19 21:20:00 +00:00
|
|
|
func (a allocPlaceResult) IsRescheduling() bool { return a.reschedule }
|
2017-07-15 23:31:33 +00:00
|
|
|
func (a allocPlaceResult) StopPreviousAlloc() (bool, string) { return false, "" }
|
|
|
|
|
|
|
|
// allocDestructiveResult contains the information required to do a destructive
|
|
|
|
// update. Destructive changes should be applied atomically, as in the old alloc
|
|
|
|
// is only stopped if the new one can be placed.
|
|
|
|
type allocDestructiveResult struct {
|
|
|
|
placeName string
|
|
|
|
placeTaskGroup *structs.TaskGroup
|
|
|
|
stopAlloc *structs.Allocation
|
|
|
|
stopStatusDescription string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a allocDestructiveResult) TaskGroup() *structs.TaskGroup { return a.placeTaskGroup }
|
|
|
|
func (a allocDestructiveResult) Name() string { return a.placeName }
|
|
|
|
func (a allocDestructiveResult) Canary() bool { return false }
|
|
|
|
func (a allocDestructiveResult) PreviousAllocation() *structs.Allocation { return a.stopAlloc }
|
2018-01-19 21:20:00 +00:00
|
|
|
func (a allocDestructiveResult) IsRescheduling() bool { return false }
|
2017-07-15 23:31:33 +00:00
|
|
|
func (a allocDestructiveResult) StopPreviousAlloc() (bool, string) {
|
|
|
|
return true, a.stopStatusDescription
|
|
|
|
}
|
|
|
|
|
2017-05-23 00:14:38 +00:00
|
|
|
// allocMatrix is a mapping of task groups to their allocation set.
|
|
|
|
type allocMatrix map[string]allocSet
|
|
|
|
|
|
|
|
// newAllocMatrix takes a job and the existing allocations for the job and
|
|
|
|
// creates an allocMatrix
|
|
|
|
func newAllocMatrix(job *structs.Job, allocs []*structs.Allocation) allocMatrix {
|
|
|
|
m := allocMatrix(make(map[string]allocSet))
|
|
|
|
for _, a := range allocs {
|
|
|
|
s, ok := m[a.TaskGroup]
|
|
|
|
if !ok {
|
|
|
|
s = make(map[string]*structs.Allocation)
|
|
|
|
m[a.TaskGroup] = s
|
|
|
|
}
|
|
|
|
s[a.ID] = a
|
|
|
|
}
|
2017-06-01 22:16:24 +00:00
|
|
|
|
|
|
|
if job != nil {
|
|
|
|
for _, tg := range job.TaskGroups {
|
2017-09-26 22:26:33 +00:00
|
|
|
if _, ok := m[tg.Name]; !ok {
|
|
|
|
m[tg.Name] = make(map[string]*structs.Allocation)
|
2017-06-01 22:16:24 +00:00
|
|
|
}
|
2017-05-23 00:14:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
|
|
|
// allocSet is a set of allocations with a series of helper functions defined
|
|
|
|
// that help reconcile state.
|
|
|
|
type allocSet map[string]*structs.Allocation
|
|
|
|
|
|
|
|
// GoString provides a human readable view of the set
|
|
|
|
func (a allocSet) GoString() string {
|
|
|
|
if len(a) == 0 {
|
|
|
|
return "[]"
|
|
|
|
}
|
|
|
|
|
|
|
|
start := fmt.Sprintf("len(%d) [\n", len(a))
|
2017-06-02 23:11:29 +00:00
|
|
|
var s []string
|
|
|
|
for k, v := range a {
|
|
|
|
s = append(s, fmt.Sprintf("%q: %v", k, v.Name))
|
|
|
|
}
|
|
|
|
return start + strings.Join(s, "\n") + "]"
|
|
|
|
}
|
|
|
|
|
2017-06-06 21:08:46 +00:00
|
|
|
// nameSet returns the set of allocation names
|
2017-06-02 23:11:29 +00:00
|
|
|
func (a allocSet) nameSet() map[string]struct{} {
|
|
|
|
names := make(map[string]struct{}, len(a))
|
|
|
|
for _, alloc := range a {
|
|
|
|
names[alloc.Name] = struct{}{}
|
|
|
|
}
|
|
|
|
return names
|
|
|
|
}
|
|
|
|
|
2017-06-06 21:08:46 +00:00
|
|
|
// nameOrder returns the set of allocation names in sorted order
|
2017-06-02 23:11:29 +00:00
|
|
|
func (a allocSet) nameOrder() []*structs.Allocation {
|
|
|
|
allocs := make([]*structs.Allocation, 0, len(a))
|
|
|
|
for _, alloc := range a {
|
|
|
|
allocs = append(allocs, alloc)
|
2017-05-23 00:14:38 +00:00
|
|
|
}
|
2017-06-02 23:11:29 +00:00
|
|
|
sort.Slice(allocs, func(i, j int) bool {
|
|
|
|
return allocs[i].Index() < allocs[j].Index()
|
|
|
|
})
|
|
|
|
return allocs
|
2017-05-23 00:14:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// difference returns a new allocSet that has all the existing item except those
|
|
|
|
// contained within the other allocation sets
|
|
|
|
func (a allocSet) difference(others ...allocSet) allocSet {
|
|
|
|
diff := make(map[string]*structs.Allocation)
|
|
|
|
OUTER:
|
|
|
|
for k, v := range a {
|
|
|
|
for _, other := range others {
|
|
|
|
if _, ok := other[k]; ok {
|
|
|
|
continue OUTER
|
|
|
|
}
|
|
|
|
}
|
|
|
|
diff[k] = v
|
|
|
|
}
|
|
|
|
return diff
|
|
|
|
}
|
|
|
|
|
2017-05-31 18:34:46 +00:00
|
|
|
// union returns a new allocSet that has the union of the two allocSets.
|
|
|
|
// Conflicts prefer the last passed allocSet containing the value
|
|
|
|
func (a allocSet) union(others ...allocSet) allocSet {
|
|
|
|
union := make(map[string]*structs.Allocation, len(a))
|
|
|
|
order := []allocSet{a}
|
|
|
|
order = append(order, others...)
|
|
|
|
|
|
|
|
for _, set := range order {
|
|
|
|
for k, v := range set {
|
|
|
|
union[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return union
|
|
|
|
}
|
|
|
|
|
2017-07-05 19:50:40 +00:00
|
|
|
// fromKeys returns an alloc set matching the passed keys
|
|
|
|
func (a allocSet) fromKeys(keys ...[]string) allocSet {
|
|
|
|
from := make(map[string]*structs.Allocation)
|
|
|
|
for _, set := range keys {
|
|
|
|
for _, k := range set {
|
|
|
|
if alloc, ok := a[k]; ok {
|
|
|
|
from[k] = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return from
|
|
|
|
}
|
|
|
|
|
2017-12-13 17:36:03 +00:00
|
|
|
// filterByTainted takes a set of tainted nodes and filters the allocation set
|
2017-05-23 00:14:38 +00:00
|
|
|
// into three groups:
|
|
|
|
// 1. Those that exist on untainted nodes
|
|
|
|
// 2. Those exist on nodes that are draining
|
|
|
|
// 3. Those that exist on lost nodes
|
|
|
|
func (a allocSet) filterByTainted(nodes map[string]*structs.Node) (untainted, migrate, lost allocSet) {
|
|
|
|
untainted = make(map[string]*structs.Allocation)
|
|
|
|
migrate = make(map[string]*structs.Allocation)
|
|
|
|
lost = make(map[string]*structs.Allocation)
|
|
|
|
for _, alloc := range a {
|
2018-02-24 00:45:57 +00:00
|
|
|
// Terminal allocs are always untainted as they should never be migrated
|
|
|
|
if alloc.TerminalStatus() {
|
2017-05-24 20:43:01 +00:00
|
|
|
untainted[alloc.ID] = alloc
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-02-24 00:45:57 +00:00
|
|
|
// Non-terminal allocs that should migrate should always migrate
|
|
|
|
if alloc.DesiredTransition.ShouldMigrate() {
|
|
|
|
migrate[alloc.ID] = alloc
|
2017-05-24 20:43:01 +00:00
|
|
|
continue
|
|
|
|
}
|
2018-02-21 18:58:04 +00:00
|
|
|
|
2018-02-24 00:45:57 +00:00
|
|
|
n, ok := nodes[alloc.NodeID]
|
|
|
|
if !ok {
|
|
|
|
// Node is untainted so alloc is untainted
|
2018-01-14 22:47:21 +00:00
|
|
|
untainted[alloc.ID] = alloc
|
2018-02-24 00:45:57 +00:00
|
|
|
continue
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
2018-02-24 00:45:57 +00:00
|
|
|
|
|
|
|
// Allocs on GC'd (nil) or lost nodes are Lost
|
|
|
|
if n == nil || n.TerminalStatus() {
|
|
|
|
lost[alloc.ID] = alloc
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// All other allocs are untainted
|
|
|
|
untainted[alloc.ID] = alloc
|
2018-01-14 22:47:21 +00:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
// filterByRescheduleable filters the allocation set to return the set of allocations that are either
|
2018-03-29 18:28:37 +00:00
|
|
|
// untainted or a set of allocations that must be rescheduled now. Allocations that can be rescheduled
|
|
|
|
// at a future time are also returned so that we can create follow up evaluations for them. Allocs are
|
|
|
|
// skipped or considered untainted according to logic defined in shouldFilter method.
|
2018-04-17 19:41:36 +00:00
|
|
|
func (a allocSet) filterByRescheduleable(isBatch bool, now time.Time, evalID string, deployment *structs.Deployment) (untainted, rescheduleNow allocSet, rescheduleLater []*delayedRescheduleInfo) {
|
2018-01-14 22:47:21 +00:00
|
|
|
untainted = make(map[string]*structs.Allocation)
|
2018-03-02 00:23:44 +00:00
|
|
|
rescheduleNow = make(map[string]*structs.Allocation)
|
2018-01-14 22:47:21 +00:00
|
|
|
|
|
|
|
for _, alloc := range a {
|
2018-03-29 18:28:37 +00:00
|
|
|
var eligibleNow, eligibleLater bool
|
2018-03-02 00:23:44 +00:00
|
|
|
var rescheduleTime time.Time
|
2018-03-29 18:28:37 +00:00
|
|
|
|
|
|
|
// Ignore allocs that have already been rescheduled
|
|
|
|
if alloc.NextAllocation != "" {
|
|
|
|
continue
|
2017-05-23 00:14:38 +00:00
|
|
|
}
|
2018-03-29 18:28:37 +00:00
|
|
|
|
2018-03-29 20:09:04 +00:00
|
|
|
isUntainted, ignore := shouldFilter(alloc, isBatch)
|
2018-03-02 00:23:44 +00:00
|
|
|
if isUntainted {
|
|
|
|
untainted[alloc.ID] = alloc
|
|
|
|
}
|
2018-03-29 20:09:04 +00:00
|
|
|
if isUntainted || ignore {
|
2018-03-29 18:28:37 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Only failed allocs with desired state run get to this point
|
|
|
|
// If the failed alloc is not eligible for rescheduling now we add it to the untainted set
|
2018-04-17 19:41:36 +00:00
|
|
|
eligibleNow, eligibleLater, rescheduleTime = updateByReschedulable(alloc, now, evalID, deployment)
|
2018-03-29 18:28:37 +00:00
|
|
|
if !eligibleNow {
|
|
|
|
untainted[alloc.ID] = alloc
|
|
|
|
if eligibleLater {
|
2019-06-06 19:04:32 +00:00
|
|
|
rescheduleLater = append(rescheduleLater, &delayedRescheduleInfo{alloc.ID, alloc, rescheduleTime})
|
2018-03-29 18:28:37 +00:00
|
|
|
}
|
|
|
|
} else {
|
2018-03-02 00:23:44 +00:00
|
|
|
rescheduleNow[alloc.ID] = alloc
|
|
|
|
}
|
2017-05-23 00:14:38 +00:00
|
|
|
}
|
2018-03-02 00:23:44 +00:00
|
|
|
return
|
|
|
|
}
|
2018-01-14 22:47:21 +00:00
|
|
|
|
2018-03-29 20:09:04 +00:00
|
|
|
// shouldFilter returns whether the alloc should be ignored or considered untainted
|
|
|
|
// Ignored allocs are filtered out.
|
|
|
|
// Untainted allocs count against the desired total.
|
|
|
|
// Filtering logic for batch jobs:
|
|
|
|
// If complete, and ran successfully - untainted
|
|
|
|
// If desired state is stop - ignore
|
|
|
|
//
|
|
|
|
// Filtering logic for service jobs:
|
|
|
|
// If desired state is stop/evict - ignore
|
|
|
|
// If client status is complete/lost - ignore
|
|
|
|
func shouldFilter(alloc *structs.Allocation, isBatch bool) (untainted, ignore bool) {
|
2018-03-29 18:28:37 +00:00
|
|
|
// Allocs from batch jobs should be filtered when the desired status
|
|
|
|
// is terminal and the client did not finish or when the client
|
|
|
|
// status is failed so that they will be replaced. If they are
|
|
|
|
// complete but not failed, they shouldn't be replaced.
|
|
|
|
if isBatch {
|
|
|
|
switch alloc.DesiredStatus {
|
|
|
|
case structs.AllocDesiredStatusStop, structs.AllocDesiredStatusEvict:
|
|
|
|
if alloc.RanSuccessfully() {
|
|
|
|
return true, false
|
|
|
|
}
|
|
|
|
return false, true
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
switch alloc.ClientStatus {
|
|
|
|
case structs.AllocClientStatusFailed:
|
|
|
|
default:
|
|
|
|
return true, false
|
|
|
|
}
|
|
|
|
return false, false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle service jobs
|
|
|
|
switch alloc.DesiredStatus {
|
|
|
|
case structs.AllocDesiredStatusStop, structs.AllocDesiredStatusEvict:
|
|
|
|
return false, true
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
switch alloc.ClientStatus {
|
|
|
|
case structs.AllocClientStatusComplete, structs.AllocClientStatusLost:
|
|
|
|
return false, true
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
return false, false
|
|
|
|
}
|
|
|
|
|
2018-03-02 00:23:44 +00:00
|
|
|
// updateByReschedulable is a helper method that encapsulates logic for whether a failed allocation
|
|
|
|
// should be rescheduled now, later or left in the untainted set
|
2018-04-17 19:41:36 +00:00
|
|
|
func updateByReschedulable(alloc *structs.Allocation, now time.Time, evalID string, d *structs.Deployment) (rescheduleNow, rescheduleLater bool, rescheduleTime time.Time) {
|
|
|
|
// If the allocation is part of an ongoing active deployment, we only allow it to reschedule
|
|
|
|
// if it has been marked eligible
|
|
|
|
if d != nil && alloc.DeploymentID == d.ID && d.Active() && !alloc.DesiredTransition.ShouldReschedule() {
|
2018-04-08 22:23:19 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-08 22:26:36 +00:00
|
|
|
// Check if the allocation is marked as it should be force rescheduled
|
|
|
|
if alloc.DesiredTransition.ShouldForceReschedule() {
|
|
|
|
rescheduleNow = true
|
|
|
|
}
|
|
|
|
|
2018-04-03 20:49:18 +00:00
|
|
|
// Reschedule if the eval ID matches the alloc's followup evalID or if its close to its reschedule time
|
2018-04-08 22:23:19 +00:00
|
|
|
rescheduleTime, eligible := alloc.NextRescheduleTime()
|
2018-04-04 12:35:20 +00:00
|
|
|
if eligible && (alloc.FollowupEvalID == evalID || rescheduleTime.Sub(now) <= rescheduleWindowSize) {
|
2018-03-02 00:23:44 +00:00
|
|
|
rescheduleNow = true
|
2018-03-29 18:28:37 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if eligible && alloc.FollowupEvalID == "" {
|
|
|
|
rescheduleLater = true
|
2018-03-02 00:23:44 +00:00
|
|
|
}
|
2017-05-23 00:14:38 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-19 21:20:00 +00:00
|
|
|
// filterByTerminal filters out terminal allocs
|
|
|
|
func filterByTerminal(untainted allocSet) (nonTerminal allocSet) {
|
|
|
|
nonTerminal = make(map[string]*structs.Allocation)
|
|
|
|
for id, alloc := range untainted {
|
|
|
|
if !alloc.TerminalStatus() {
|
|
|
|
nonTerminal[id] = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-23 00:14:38 +00:00
|
|
|
// filterByDeployment filters allocations into two sets, those that match the
|
|
|
|
// given deployment ID and those that don't
|
|
|
|
func (a allocSet) filterByDeployment(id string) (match, nonmatch allocSet) {
|
|
|
|
match = make(map[string]*structs.Allocation)
|
|
|
|
nonmatch = make(map[string]*structs.Allocation)
|
|
|
|
for _, alloc := range a {
|
|
|
|
if alloc.DeploymentID == id {
|
|
|
|
match[alloc.ID] = alloc
|
|
|
|
} else {
|
|
|
|
nonmatch[alloc.ID] = alloc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2017-06-06 21:08:46 +00:00
|
|
|
|
|
|
|
// allocNameIndex is used to select allocation names for placement or removal
|
|
|
|
// given an existing set of placed allocations.
|
|
|
|
type allocNameIndex struct {
|
|
|
|
job, taskGroup string
|
|
|
|
count int
|
|
|
|
b structs.Bitmap
|
|
|
|
}
|
|
|
|
|
|
|
|
// newAllocNameIndex returns an allocNameIndex for use in selecting names of
|
|
|
|
// allocations to create or stop. It takes the job and task group name, desired
|
|
|
|
// count and any existing allocations as input.
|
|
|
|
func newAllocNameIndex(job, taskGroup string, count int, in allocSet) *allocNameIndex {
|
|
|
|
return &allocNameIndex{
|
|
|
|
count: count,
|
|
|
|
b: bitmapFrom(in, uint(count)),
|
|
|
|
job: job,
|
|
|
|
taskGroup: taskGroup,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// bitmapFrom creates a bitmap from the given allocation set and a minimum size
|
|
|
|
// maybe given. The size of the bitmap is as the larger of the passed minimum
|
2017-08-15 19:27:05 +00:00
|
|
|
// and the maximum alloc index of the passed input (byte aligned).
|
2017-06-06 21:08:46 +00:00
|
|
|
func bitmapFrom(input allocSet, minSize uint) structs.Bitmap {
|
|
|
|
var max uint
|
|
|
|
for _, a := range input {
|
|
|
|
if num := a.Index(); num > max {
|
|
|
|
max = num
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if l := uint(len(input)); minSize < l {
|
|
|
|
minSize = l
|
|
|
|
}
|
2017-08-12 22:37:02 +00:00
|
|
|
|
2017-06-06 21:08:46 +00:00
|
|
|
if max < minSize {
|
|
|
|
max = minSize
|
2017-08-15 19:27:05 +00:00
|
|
|
} else if max%8 == 0 {
|
2017-08-12 22:37:02 +00:00
|
|
|
// This may be possible if the job was scaled down. We want to make sure
|
2017-09-26 22:26:33 +00:00
|
|
|
// that the max index is not byte-aligned otherwise we will overflow
|
2017-08-12 22:37:02 +00:00
|
|
|
// the bitmap.
|
|
|
|
max++
|
2017-06-06 21:08:46 +00:00
|
|
|
}
|
2017-08-12 22:37:02 +00:00
|
|
|
|
2017-06-06 21:08:46 +00:00
|
|
|
if max == 0 {
|
|
|
|
max = 8
|
|
|
|
}
|
|
|
|
|
|
|
|
// byteAlign the count
|
|
|
|
if remainder := max % 8; remainder != 0 {
|
|
|
|
max = max + 8 - remainder
|
|
|
|
}
|
|
|
|
|
|
|
|
bitmap, err := structs.NewBitmap(max)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, a := range input {
|
|
|
|
bitmap.Set(a.Index())
|
|
|
|
}
|
|
|
|
|
|
|
|
return bitmap
|
|
|
|
}
|
|
|
|
|
2017-12-13 17:36:03 +00:00
|
|
|
// RemoveHighest removes and returns the highest n used names. The returned set
|
2017-06-06 21:08:46 +00:00
|
|
|
// can be less than n if there aren't n names set in the index
|
|
|
|
func (a *allocNameIndex) Highest(n uint) map[string]struct{} {
|
|
|
|
h := make(map[string]struct{}, n)
|
|
|
|
for i := a.b.Size(); i > uint(0) && uint(len(h)) < n; i-- {
|
|
|
|
// Use this to avoid wrapping around b/c of the unsigned int
|
|
|
|
idx := i - 1
|
|
|
|
if a.b.Check(idx) {
|
|
|
|
a.b.Unset(idx)
|
|
|
|
h[structs.AllocName(a.job, a.taskGroup, idx)] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return h
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set sets the indexes from the passed alloc set as used
|
|
|
|
func (a *allocNameIndex) Set(set allocSet) {
|
|
|
|
for _, alloc := range set {
|
|
|
|
a.b.Set(alloc.Index())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unset unsets all indexes of the passed alloc set as being used
|
|
|
|
func (a *allocNameIndex) Unset(as allocSet) {
|
|
|
|
for _, alloc := range as {
|
|
|
|
a.b.Unset(alloc.Index())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// UnsetIndex unsets the index as having its name used
|
|
|
|
func (a *allocNameIndex) UnsetIndex(idx uint) {
|
|
|
|
a.b.Unset(idx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// NextCanaries returns the next n names for use as canaries and sets them as
|
|
|
|
// used. The existing canaries and destructive updates are also passed in.
|
|
|
|
func (a *allocNameIndex) NextCanaries(n uint, existing, destructive allocSet) []string {
|
|
|
|
next := make([]string, 0, n)
|
|
|
|
|
|
|
|
// Create a name index
|
|
|
|
existingNames := existing.nameSet()
|
|
|
|
|
|
|
|
// First select indexes from the allocations that are undergoing destructive
|
|
|
|
// updates. This way we avoid duplicate names as they will get replaced.
|
|
|
|
dmap := bitmapFrom(destructive, uint(a.count))
|
2018-04-20 00:08:24 +00:00
|
|
|
remainder := n
|
2017-06-06 21:08:46 +00:00
|
|
|
for _, idx := range dmap.IndexesInRange(true, uint(0), uint(a.count)-1) {
|
|
|
|
name := structs.AllocName(a.job, a.taskGroup, uint(idx))
|
|
|
|
if _, used := existingNames[name]; !used {
|
|
|
|
next = append(next, name)
|
|
|
|
a.b.Set(uint(idx))
|
|
|
|
|
|
|
|
// If we have enough, return
|
2018-04-20 00:08:24 +00:00
|
|
|
remainder = n - uint(len(next))
|
2017-06-06 21:08:46 +00:00
|
|
|
if remainder == 0 {
|
|
|
|
return next
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the set of unset names that can be used
|
|
|
|
for _, idx := range a.b.IndexesInRange(false, uint(0), uint(a.count)-1) {
|
|
|
|
name := structs.AllocName(a.job, a.taskGroup, uint(idx))
|
|
|
|
if _, used := existingNames[name]; !used {
|
|
|
|
next = append(next, name)
|
|
|
|
a.b.Set(uint(idx))
|
|
|
|
|
|
|
|
// If we have enough, return
|
|
|
|
remainder = n - uint(len(next))
|
|
|
|
if remainder == 0 {
|
|
|
|
return next
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-20 00:08:24 +00:00
|
|
|
// We have exhausted the preferred and free set. Pick starting from n to
|
2018-04-25 22:03:30 +00:00
|
|
|
// n+remainder, to avoid overlapping where possible. An example is the
|
|
|
|
// desired count is 3 and we want 5 canaries. The first 3 canaries can use
|
|
|
|
// index [0, 1, 2] but after that we prefer picking indexes [4, 5] so that
|
|
|
|
// we do not overlap. Once the canaries are promoted, these would be the
|
|
|
|
// allocations that would be shut down as well.
|
2018-04-20 00:08:24 +00:00
|
|
|
for i := uint(a.count); i < uint(a.count)+remainder; i++ {
|
2017-06-06 21:08:46 +00:00
|
|
|
name := structs.AllocName(a.job, a.taskGroup, i)
|
2018-04-20 00:08:24 +00:00
|
|
|
next = append(next, name)
|
2017-06-06 21:08:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return next
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next returns the next n names for use as new placements and sets them as
|
|
|
|
// used.
|
|
|
|
func (a *allocNameIndex) Next(n uint) []string {
|
|
|
|
next := make([]string, 0, n)
|
|
|
|
|
|
|
|
// Get the set of unset names that can be used
|
|
|
|
remainder := n
|
|
|
|
for _, idx := range a.b.IndexesInRange(false, uint(0), uint(a.count)-1) {
|
|
|
|
next = append(next, structs.AllocName(a.job, a.taskGroup, uint(idx)))
|
|
|
|
a.b.Set(uint(idx))
|
|
|
|
|
|
|
|
// If we have enough, return
|
|
|
|
remainder = n - uint(len(next))
|
|
|
|
if remainder == 0 {
|
|
|
|
return next
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We have exhausted the free set, now just pick overlapping indexes
|
|
|
|
var i uint
|
|
|
|
for i = 0; i < remainder; i++ {
|
|
|
|
next = append(next, structs.AllocName(a.job, a.taskGroup, i))
|
|
|
|
a.b.Set(i)
|
|
|
|
}
|
|
|
|
|
|
|
|
return next
|
|
|
|
}
|