open-nomad/nomad/structs/funcs.go

295 lines
7.9 KiB
Go
Raw Normal View History

2015-08-05 00:19:05 +00:00
package structs
2015-09-07 22:08:50 +00:00
import (
"encoding/binary"
2015-09-07 22:08:50 +00:00
"fmt"
"math"
"sort"
"strings"
"golang.org/x/crypto/blake2b"
multierror "github.com/hashicorp/go-multierror"
2017-08-20 21:30:27 +00:00
lru "github.com/hashicorp/golang-lru"
"github.com/hashicorp/nomad/acl"
2015-09-07 22:08:50 +00:00
)
2015-08-13 18:54:59 +00:00
// MergeMultierrorWarnings takes job warnings and canonicalize warnings and
// merges them into a returnable string. Both the errors may be nil.
2017-09-13 18:38:29 +00:00
func MergeMultierrorWarnings(warnings ...error) string {
var warningMsg multierror.Error
2017-09-13 18:38:29 +00:00
for _, warn := range warnings {
if warn != nil {
multierror.Append(&warningMsg, warn)
}
}
2017-09-14 04:48:52 +00:00
if len(warningMsg.Errors) == 0 {
return ""
}
// Set the formatter
warningMsg.ErrorFormat = warningsFormatter
return warningMsg.Error()
}
// warningsFormatter is used to format job warnings
func warningsFormatter(es []error) string {
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %s", err)
}
return fmt.Sprintf(
"%d warning(s):\n\n%s",
len(es), strings.Join(points, "\n"))
}
2015-08-05 00:19:05 +00:00
// RemoveAllocs is used to remove any allocs with the given IDs
// from the list of allocations
func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation {
2015-08-05 00:19:05 +00:00
// Convert remove into a set
removeSet := make(map[string]struct{})
for _, remove := range remove {
removeSet[remove.ID] = struct{}{}
2015-08-05 00:19:05 +00:00
}
n := len(alloc)
for i := 0; i < n; i++ {
if _, ok := removeSet[alloc[i].ID]; ok {
alloc[i], alloc[n-1] = alloc[n-1], nil
i--
n--
}
}
alloc = alloc[:n]
return alloc
}
2015-08-05 00:28:19 +00:00
// FilterTerminalAllocs filters out all allocations in a terminal state and
2016-08-31 21:06:31 +00:00
// returns the latest terminal allocations
func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) {
terminalAllocsByName := make(map[string]*Allocation)
2015-08-23 01:27:51 +00:00
n := len(allocs)
for i := 0; i < n; i++ {
if allocs[i].TerminalStatus() {
// Add the allocation to the terminal allocs map if it's not already
// added or has a higher create index than the one which is
// currently present.
alloc, ok := terminalAllocsByName[allocs[i].Name]
if !ok || alloc.CreateIndex < allocs[i].CreateIndex {
terminalAllocsByName[allocs[i].Name] = allocs[i]
}
// Remove the allocation
2015-08-23 01:27:51 +00:00
allocs[i], allocs[n-1] = allocs[n-1], nil
i--
n--
}
}
return allocs[:n], terminalAllocsByName
2015-08-23 01:27:51 +00:00
}
// AllocsFit checks if a given set of allocations will fit on a node.
// The netIdx can optionally be provided if its already been computed.
// If the netIdx is provided, it is assumed that the client has already
// ensured there are no collisions.
func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) {
2015-08-05 00:48:24 +00:00
// Compute the utilization from zero
used := new(Resources)
// Add the reserved resources of the node
if node.Reserved != nil {
if err := used.Add(node.Reserved); err != nil {
return false, "", nil, err
2015-08-05 00:48:24 +00:00
}
}
// For each alloc, add the resources
for _, alloc := range allocs {
2016-03-01 22:09:25 +00:00
if alloc.Resources != nil {
if err := used.Add(alloc.Resources); err != nil {
return false, "", nil, err
}
} else if alloc.TaskResources != nil {
2016-08-26 22:28:29 +00:00
2016-08-29 19:49:52 +00:00
// Adding the shared resource asks for the allocation to the used
2016-08-26 22:28:29 +00:00
// resources
2016-08-29 19:49:52 +00:00
if err := used.Add(alloc.SharedResources); err != nil {
return false, "", nil, err
2016-08-26 22:28:29 +00:00
}
2016-03-01 22:09:25 +00:00
// Allocations within the plan have the combined resources stripped
// to save space, so sum up the individual task resources.
for _, taskResource := range alloc.TaskResources {
if err := used.Add(taskResource); err != nil {
return false, "", nil, err
}
}
} else {
return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID)
2015-08-05 00:48:24 +00:00
}
}
// Check that the node resources are a super set of those
// that are being allocated
if superset, dimension := node.Resources.Superset(used); !superset {
return false, dimension, used, nil
2015-08-05 00:48:24 +00:00
}
// Create the network index if missing
if netIdx == nil {
netIdx = NewNetworkIndex()
defer netIdx.Release()
if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) {
return false, "reserved port collision", used, nil
}
}
// Check if the network is overcommitted
if netIdx.Overcommitted() {
return false, "bandwidth exceeded", used, nil
2015-08-05 00:48:24 +00:00
}
// Allocations fit!
return true, "", used, nil
2015-08-13 18:54:59 +00:00
}
// ScoreFit is used to score the fit based on the Google work published here:
// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
// This is equivalent to their BestFit v3
func ScoreFit(node *Node, util *Resources) float64 {
// Determine the node availability
2015-09-23 18:14:32 +00:00
nodeCpu := float64(node.Resources.CPU)
2015-08-13 18:54:59 +00:00
if node.Reserved != nil {
2015-09-23 18:14:32 +00:00
nodeCpu -= float64(node.Reserved.CPU)
2015-08-13 18:54:59 +00:00
}
nodeMem := float64(node.Resources.MemoryMB)
if node.Reserved != nil {
nodeMem -= float64(node.Reserved.MemoryMB)
}
// Compute the free percentage
2015-09-23 18:14:32 +00:00
freePctCpu := 1 - (float64(util.CPU) / nodeCpu)
2015-08-13 18:54:59 +00:00
freePctRam := 1 - (float64(util.MemoryMB) / nodeMem)
// Total will be "maximized" the smaller the value is.
// At 100% utilization, the total is 2, while at 0% util it is 20.
total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
// Invert so that the "maximized" total represents a high-value
// score. Because the floor is 20, we simply use that as an anchor.
// This means at a perfect fit, we return 18 as the score.
score := 20.0 - total
// Bound the score, just in case
// If the score is over 18, that means we've overfit the node.
if score > 18.0 {
score = 18.0
} else if score < 0 {
score = 0
}
return score
2015-08-05 00:48:24 +00:00
}
2015-09-07 22:08:50 +00:00
2016-02-11 17:08:20 +00:00
func CopySliceConstraints(s []*Constraint) []*Constraint {
l := len(s)
if l == 0 {
return nil
}
c := make([]*Constraint, l)
for i, v := range s {
c[i] = v.Copy()
}
return c
}
2016-08-17 00:50:14 +00:00
// VaultPoliciesSet takes the structure returned by VaultPolicies and returns
// the set of required policies
2016-08-18 17:50:47 +00:00
func VaultPoliciesSet(policies map[string]map[string]*Vault) []string {
2016-08-17 00:50:14 +00:00
set := make(map[string]struct{})
for _, tgp := range policies {
for _, tp := range tgp {
2016-08-18 17:50:47 +00:00
for _, p := range tp.Policies {
2016-08-17 00:50:14 +00:00
set[p] = struct{}{}
}
}
}
flattened := make([]string, 0, len(set))
for p := range set {
flattened = append(flattened, p)
}
return flattened
}
// DenormalizeAllocationJobs is used to attach a job to all allocations that are
// non-terminal and do not have a job already. This is useful in cases where the
// job is normalized.
func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) {
if job != nil {
for _, alloc := range allocs {
if alloc.Job == nil && !alloc.TerminalStatus() {
alloc.Job = job
}
}
}
}
2017-05-31 18:34:46 +00:00
// AllocName returns the name of the allocation given the input.
func AllocName(job, group string, idx uint) string {
return fmt.Sprintf("%s.%s[%d]", job, group, idx)
}
// ACLPolicyListHash returns a consistent hash for a set of policies.
func ACLPolicyListHash(policies []*ACLPolicy) string {
cacheKeyHash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
for _, policy := range policies {
cacheKeyHash.Write([]byte(policy.Name))
binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex)
}
cacheKey := string(cacheKeyHash.Sum(nil))
return cacheKey
}
2017-08-20 21:30:27 +00:00
// CompileACLObject compiles a set of ACL policies into an ACL object with a cache
func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) {
// Sort the policies to ensure consistent ordering
2017-08-23 20:49:08 +00:00
sort.Slice(policies, func(i, j int) bool {
return policies[i].Name < policies[j].Name
})
2017-08-20 21:30:27 +00:00
// Determine the cache key
cacheKey := ACLPolicyListHash(policies)
aclRaw, ok := cache.Get(cacheKey)
if ok {
return aclRaw.(*acl.ACL), nil
}
// Parse the policies
parsed := make([]*acl.Policy, 0, len(policies))
for _, policy := range policies {
p, err := acl.Parse(policy.Rules)
if err != nil {
return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err)
}
parsed = append(parsed, p)
}
// Create the ACL object
aclObj, err := acl.NewACL(false, parsed)
if err != nil {
return nil, fmt.Errorf("failed to construct ACL: %v", err)
}
// Update the cache
cache.Add(cacheKey, aclObj)
return aclObj, nil
}