2015-08-05 00:19:05 +00:00
|
|
|
package structs
|
|
|
|
|
2015-09-07 22:08:50 +00:00
|
|
|
import (
|
|
|
|
crand "crypto/rand"
|
|
|
|
"fmt"
|
|
|
|
"math"
|
|
|
|
)
|
2015-08-13 18:54:59 +00:00
|
|
|
|
2015-08-05 00:19:05 +00:00
|
|
|
// RemoveAllocs is used to remove any allocs with the given IDs
|
|
|
|
// from the list of allocations
|
2015-08-25 23:52:56 +00:00
|
|
|
func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation {
|
2015-08-05 00:19:05 +00:00
|
|
|
// Convert remove into a set
|
|
|
|
removeSet := make(map[string]struct{})
|
2015-08-25 23:52:56 +00:00
|
|
|
for _, remove := range remove {
|
|
|
|
removeSet[remove.ID] = struct{}{}
|
2015-08-05 00:19:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
n := len(alloc)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
if _, ok := removeSet[alloc[i].ID]; ok {
|
|
|
|
alloc[i], alloc[n-1] = alloc[n-1], nil
|
|
|
|
i--
|
|
|
|
n--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
alloc = alloc[:n]
|
|
|
|
return alloc
|
|
|
|
}
|
2015-08-05 00:28:19 +00:00
|
|
|
|
2015-08-23 01:27:51 +00:00
|
|
|
// FilterTerminalAllocs filters out all allocations in a terminal state
|
|
|
|
func FilterTerminalAllocs(allocs []*Allocation) []*Allocation {
|
|
|
|
n := len(allocs)
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
if allocs[i].TerminalStatus() {
|
|
|
|
allocs[i], allocs[n-1] = allocs[n-1], nil
|
|
|
|
i--
|
|
|
|
n--
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return allocs[:n]
|
|
|
|
}
|
|
|
|
|
2015-09-13 21:56:51 +00:00
|
|
|
// AllocsFit checks if a given set of allocations will fit on a node.
|
|
|
|
// The netIdx can optionally be provided if its already been computed.
|
|
|
|
// If the netIdx is provided, it is assumed that the client has already
|
|
|
|
// ensured there are no collisions.
|
2015-09-14 01:38:11 +00:00
|
|
|
func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) {
|
2015-08-05 00:48:24 +00:00
|
|
|
// Compute the utilization from zero
|
|
|
|
used := new(Resources)
|
|
|
|
|
|
|
|
// Add the reserved resources of the node
|
|
|
|
if node.Reserved != nil {
|
|
|
|
if err := used.Add(node.Reserved); err != nil {
|
2015-09-14 01:38:11 +00:00
|
|
|
return false, "", nil, err
|
2015-08-05 00:48:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// For each alloc, add the resources
|
|
|
|
for _, alloc := range allocs {
|
2016-03-01 22:09:25 +00:00
|
|
|
if alloc.Resources != nil {
|
|
|
|
if err := used.Add(alloc.Resources); err != nil {
|
|
|
|
return false, "", nil, err
|
|
|
|
}
|
|
|
|
} else if alloc.TaskResources != nil {
|
2016-08-26 22:28:29 +00:00
|
|
|
|
|
|
|
// Adding the disk resource ask for the allocation to the used
|
|
|
|
// resources
|
|
|
|
if taskGroup := alloc.Job.LookupTaskGroup(alloc.TaskGroup); taskGroup != nil {
|
|
|
|
used.DiskMB += taskGroup.LocalDisk.DiskMB
|
|
|
|
}
|
2016-03-01 22:09:25 +00:00
|
|
|
// Allocations within the plan have the combined resources stripped
|
|
|
|
// to save space, so sum up the individual task resources.
|
|
|
|
for _, taskResource := range alloc.TaskResources {
|
|
|
|
if err := used.Add(taskResource); err != nil {
|
|
|
|
return false, "", nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID)
|
2015-08-05 00:48:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the node resources are a super set of those
|
|
|
|
// that are being allocated
|
2015-09-14 01:38:11 +00:00
|
|
|
if superset, dimension := node.Resources.Superset(used); !superset {
|
|
|
|
return false, dimension, used, nil
|
2015-08-05 00:48:24 +00:00
|
|
|
}
|
|
|
|
|
2015-09-13 21:56:51 +00:00
|
|
|
// Create the network index if missing
|
|
|
|
if netIdx == nil {
|
|
|
|
netIdx = NewNetworkIndex()
|
2016-02-20 20:18:22 +00:00
|
|
|
defer netIdx.Release()
|
2015-09-13 21:56:51 +00:00
|
|
|
if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) {
|
2015-09-14 01:38:11 +00:00
|
|
|
return false, "reserved port collision", used, nil
|
2015-09-13 21:56:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if the network is overcommitted
|
|
|
|
if netIdx.Overcommitted() {
|
2015-09-14 01:38:11 +00:00
|
|
|
return false, "bandwidth exceeded", used, nil
|
2015-08-05 00:48:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Allocations fit!
|
2015-09-14 01:38:11 +00:00
|
|
|
return true, "", used, nil
|
2015-08-13 18:54:59 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ScoreFit is used to score the fit based on the Google work published here:
|
|
|
|
// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
|
|
|
|
// This is equivalent to their BestFit v3
|
|
|
|
func ScoreFit(node *Node, util *Resources) float64 {
|
|
|
|
// Determine the node availability
|
2015-09-23 18:14:32 +00:00
|
|
|
nodeCpu := float64(node.Resources.CPU)
|
2015-08-13 18:54:59 +00:00
|
|
|
if node.Reserved != nil {
|
2015-09-23 18:14:32 +00:00
|
|
|
nodeCpu -= float64(node.Reserved.CPU)
|
2015-08-13 18:54:59 +00:00
|
|
|
}
|
|
|
|
nodeMem := float64(node.Resources.MemoryMB)
|
|
|
|
if node.Reserved != nil {
|
|
|
|
nodeMem -= float64(node.Reserved.MemoryMB)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Compute the free percentage
|
2015-09-23 18:14:32 +00:00
|
|
|
freePctCpu := 1 - (float64(util.CPU) / nodeCpu)
|
2015-08-13 18:54:59 +00:00
|
|
|
freePctRam := 1 - (float64(util.MemoryMB) / nodeMem)
|
|
|
|
|
|
|
|
// Total will be "maximized" the smaller the value is.
|
|
|
|
// At 100% utilization, the total is 2, while at 0% util it is 20.
|
|
|
|
total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
|
|
|
|
|
|
|
|
// Invert so that the "maximized" total represents a high-value
|
|
|
|
// score. Because the floor is 20, we simply use that as an anchor.
|
|
|
|
// This means at a perfect fit, we return 18 as the score.
|
|
|
|
score := 20.0 - total
|
|
|
|
|
|
|
|
// Bound the score, just in case
|
|
|
|
// If the score is over 18, that means we've overfit the node.
|
|
|
|
if score > 18.0 {
|
|
|
|
score = 18.0
|
|
|
|
} else if score < 0 {
|
|
|
|
score = 0
|
|
|
|
}
|
|
|
|
return score
|
2015-08-05 00:48:24 +00:00
|
|
|
}
|
2015-09-07 22:08:50 +00:00
|
|
|
|
|
|
|
// GenerateUUID is used to generate a random UUID
|
|
|
|
func GenerateUUID() string {
|
|
|
|
buf := make([]byte, 16)
|
|
|
|
if _, err := crand.Read(buf); err != nil {
|
|
|
|
panic(fmt.Errorf("failed to read random bytes: %v", err))
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
|
|
|
|
buf[0:4],
|
|
|
|
buf[4:6],
|
|
|
|
buf[6:8],
|
|
|
|
buf[8:10],
|
|
|
|
buf[10:16])
|
|
|
|
}
|
2016-02-11 01:54:43 +00:00
|
|
|
|
|
|
|
// Helpers for copying generic structures.
|
|
|
|
func CopyMapStringString(m map[string]string) map[string]string {
|
2016-02-11 17:08:20 +00:00
|
|
|
l := len(m)
|
|
|
|
if l == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
c := make(map[string]string, l)
|
2016-02-11 01:54:43 +00:00
|
|
|
for k, v := range m {
|
|
|
|
c[k] = v
|
|
|
|
}
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
func CopyMapStringInt(m map[string]int) map[string]int {
|
2016-02-11 17:08:20 +00:00
|
|
|
l := len(m)
|
|
|
|
if l == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
c := make(map[string]int, l)
|
2016-02-11 01:54:43 +00:00
|
|
|
for k, v := range m {
|
|
|
|
c[k] = v
|
|
|
|
}
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
func CopyMapStringFloat64(m map[string]float64) map[string]float64 {
|
2016-02-11 17:08:20 +00:00
|
|
|
l := len(m)
|
|
|
|
if l == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
c := make(map[string]float64, l)
|
2016-02-11 01:54:43 +00:00
|
|
|
for k, v := range m {
|
|
|
|
c[k] = v
|
|
|
|
}
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
func CopySliceString(s []string) []string {
|
2016-02-11 17:08:20 +00:00
|
|
|
l := len(s)
|
|
|
|
if l == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
c := make([]string, l)
|
2016-02-11 01:54:43 +00:00
|
|
|
for i, v := range s {
|
|
|
|
c[i] = v
|
|
|
|
}
|
|
|
|
return c
|
|
|
|
}
|
2016-02-11 17:08:20 +00:00
|
|
|
|
2016-08-09 22:00:50 +00:00
|
|
|
func CopySliceInt(s []int) []int {
|
|
|
|
l := len(s)
|
|
|
|
if l == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
c := make([]int, l)
|
|
|
|
for i, v := range s {
|
|
|
|
c[i] = v
|
|
|
|
}
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2016-02-11 17:08:20 +00:00
|
|
|
func CopySliceConstraints(s []*Constraint) []*Constraint {
|
|
|
|
l := len(s)
|
|
|
|
if l == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
c := make([]*Constraint, l)
|
|
|
|
for i, v := range s {
|
|
|
|
c[i] = v.Copy()
|
|
|
|
}
|
|
|
|
return c
|
|
|
|
}
|
2016-08-17 00:50:14 +00:00
|
|
|
|
|
|
|
// SliceStringIsSubset returns whether the smaller set of strings is a subset of
|
|
|
|
// the larger. If the smaller slice is not a subset, the offending elements are
|
|
|
|
// returned.
|
|
|
|
func SliceStringIsSubset(larger, smaller []string) (bool, []string) {
|
|
|
|
largerSet := make(map[string]struct{}, len(larger))
|
|
|
|
for _, l := range larger {
|
|
|
|
largerSet[l] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
subset := true
|
|
|
|
var offending []string
|
|
|
|
for _, s := range smaller {
|
|
|
|
if _, ok := largerSet[s]; !ok {
|
|
|
|
subset = false
|
|
|
|
offending = append(offending, s)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return subset, offending
|
|
|
|
}
|
|
|
|
|
|
|
|
// VaultPoliciesSet takes the structure returned by VaultPolicies and returns
|
|
|
|
// the set of required policies
|
2016-08-18 17:50:47 +00:00
|
|
|
func VaultPoliciesSet(policies map[string]map[string]*Vault) []string {
|
2016-08-17 00:50:14 +00:00
|
|
|
set := make(map[string]struct{})
|
|
|
|
|
|
|
|
for _, tgp := range policies {
|
|
|
|
for _, tp := range tgp {
|
2016-08-18 17:50:47 +00:00
|
|
|
for _, p := range tp.Policies {
|
2016-08-17 00:50:14 +00:00
|
|
|
set[p] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
flattened := make([]string, 0, len(set))
|
|
|
|
for p := range set {
|
|
|
|
flattened = append(flattened, p)
|
|
|
|
}
|
|
|
|
return flattened
|
|
|
|
}
|