fixing dependencies

This commit is contained in:
Chris Hoffman 2017-10-02 13:46:42 -04:00
parent 4fbf9253a2
commit 484401689b
15 changed files with 44 additions and 7905 deletions

View File

@ -9,7 +9,6 @@ import (
"github.com/gorhill/cronexpr"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
@ -330,6 +329,20 @@ type UpdateStrategy struct {
Canary *int `mapstructure:"canary"`
}
// DefaultUpdateStrategy provides a baseline that can be used to upgrade
// jobs with the old policy or for populating field defaults.
func DefaultUpdateStrategy() *UpdateStrategy {
return &UpdateStrategy{
Stagger: helper.TimeToPtr(30 * time.Second),
MaxParallel: helper.IntToPtr(1),
HealthCheck: helper.StringToPtr("checks"),
MinHealthyTime: helper.TimeToPtr(10 * time.Second),
HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
AutoRevert: helper.BoolToPtr(false),
Canary: helper.IntToPtr(0),
}
}
func (u *UpdateStrategy) Copy() *UpdateStrategy {
if u == nil {
return nil
@ -403,34 +416,34 @@ func (u *UpdateStrategy) Merge(o *UpdateStrategy) {
}
func (u *UpdateStrategy) Canonicalize() {
d := structs.DefaultUpdateStrategy
d := DefaultUpdateStrategy()
if u.MaxParallel == nil {
u.MaxParallel = helper.IntToPtr(d.MaxParallel)
u.MaxParallel = d.MaxParallel
}
if u.Stagger == nil {
u.Stagger = helper.TimeToPtr(d.Stagger)
u.Stagger = d.Stagger
}
if u.HealthCheck == nil {
u.HealthCheck = helper.StringToPtr(d.HealthCheck)
u.HealthCheck = d.HealthCheck
}
if u.HealthyDeadline == nil {
u.HealthyDeadline = helper.TimeToPtr(d.HealthyDeadline)
u.HealthyDeadline = d.HealthyDeadline
}
if u.MinHealthyTime == nil {
u.MinHealthyTime = helper.TimeToPtr(d.MinHealthyTime)
u.MinHealthyTime = d.MinHealthyTime
}
if u.AutoRevert == nil {
u.AutoRevert = helper.BoolToPtr(d.AutoRevert)
u.AutoRevert = d.AutoRevert
}
if u.Canary == nil {
u.Canary = helper.IntToPtr(d.Canary)
u.Canary = d.Canary
}
}

View File

@ -4,27 +4,27 @@ import (
"time"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/helper/uuid"
)
func MockJob() *Job {
job := &Job{
Region: helper.StringToPtr("global"),
ID: helper.StringToPtr(structs.GenerateUUID()),
ID: helper.StringToPtr(uuid.Generate()),
Name: helper.StringToPtr("my-job"),
Type: helper.StringToPtr("service"),
Priority: helper.IntToPtr(50),
AllAtOnce: helper.BoolToPtr(false),
Datacenters: []string{"dc1"},
Constraints: []*Constraint{
&Constraint{
{
LTarget: "${attr.kernel.name}",
RTarget: "linux",
Operand: "=",
},
},
TaskGroups: []*TaskGroup{
&TaskGroup{
{
Name: helper.StringToPtr("web"),
Count: helper.IntToPtr(10),
EphemeralDisk: &EphemeralDisk{
@ -37,7 +37,7 @@ func MockJob() *Job {
Mode: helper.StringToPtr("delay"),
},
Tasks: []*Task{
&Task{
{
Name: "web",
Driver: "exec",
Config: map[string]interface{}{
@ -72,7 +72,7 @@ func MockJob() *Job {
CPU: helper.IntToPtr(500),
MemoryMB: helper.IntToPtr(256),
Networks: []*NetworkResource{
&NetworkResource{
{
MBits: helper.IntToPtr(50),
DynamicPorts: []Port{{Label: "http"}, {Label: "admin"}},
},

View File

@ -22,7 +22,7 @@ func (n *Nodes) List(q *QueryOptions) ([]*NodeListStub, *QueryMeta, error) {
if err != nil {
return nil, nil, err
}
sort.Sort(NodeIndexSort(resp))
sort.Sort(resp)
return resp, qm, nil
}

View File

@ -75,7 +75,7 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err
// TODO (alexdadgar) Currently we made address a query parameter. Once
// IDs are in place this will be DELETE /v1/operator/raft/peer/<id>.
r.params.Set("address", string(address))
r.params.Set("address", address)
_, resp, err := requireOK(op.c.doRequest(r))
if err != nil {

View File

@ -83,7 +83,7 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) {
// failing health checks.
type CheckRestart struct {
Limit int `mapstructure:"limit"`
Grace *time.Duration `mapstructure:"grace_period"`
Grace *time.Duration `mapstructure:"grace"`
IgnoreWarnings bool `mapstructure:"ignore_warnings"`
}

View File

@ -180,7 +180,7 @@ func CopyMapStringStruct(m map[string]struct{}) map[string]struct{} {
}
c := make(map[string]struct{}, l)
for k, _ := range m {
for k := range m {
c[k] = struct{}{}
}
return c

View File

@ -1,78 +0,0 @@
package structs
import "fmt"
// Bitmap is a simple uncompressed bitmap
type Bitmap []byte
// NewBitmap returns a bitmap with up to size indexes
func NewBitmap(size uint) (Bitmap, error) {
if size == 0 {
return nil, fmt.Errorf("bitmap must be positive size")
}
if size&7 != 0 {
return nil, fmt.Errorf("bitmap must be byte aligned")
}
b := make([]byte, size>>3)
return Bitmap(b), nil
}
// Copy returns a copy of the Bitmap
func (b Bitmap) Copy() (Bitmap, error) {
if b == nil {
return nil, fmt.Errorf("can't copy nil Bitmap")
}
raw := make([]byte, len(b))
copy(raw, b)
return Bitmap(raw), nil
}
// Size returns the size of the bitmap
func (b Bitmap) Size() uint {
return uint(len(b) << 3)
}
// Set is used to set the given index of the bitmap
func (b Bitmap) Set(idx uint) {
bucket := idx >> 3
mask := byte(1 << (idx & 7))
b[bucket] |= mask
}
// Unset is used to unset the given index of the bitmap
func (b Bitmap) Unset(idx uint) {
bucket := idx >> 3
// Mask should be all ones minus the idx position
offset := 1 << (idx & 7)
mask := byte(offset ^ 0xff)
b[bucket] &= mask
}
// Check is used to check the given index of the bitmap
func (b Bitmap) Check(idx uint) bool {
bucket := idx >> 3
mask := byte(1 << (idx & 7))
return (b[bucket] & mask) != 0
}
// Clear is used to efficiently clear the bitmap
func (b Bitmap) Clear() {
for i := range b {
b[i] = 0
}
}
// IndexesInRange returns the indexes in which the values are either set or unset based
// on the passed parameter in the passed range
func (b Bitmap) IndexesInRange(set bool, from, to uint) []int {
var indexes []int
for i := from; i <= to && i < b.Size(); i++ {
c := b.Check(i)
if c && set || !c && !set {
indexes = append(indexes, int(i))
}
}
return indexes
}

File diff suppressed because it is too large Load Diff

View File

@ -1,310 +0,0 @@
package structs
import (
crand "crypto/rand"
"encoding/binary"
"fmt"
"math"
"sort"
"strings"
"golang.org/x/crypto/blake2b"
multierror "github.com/hashicorp/go-multierror"
lru "github.com/hashicorp/golang-lru"
"github.com/hashicorp/nomad/acl"
)
// MergeMultierrorWarnings takes job warnings and canonicalize warnings and
// merges them into a returnable string. Both the errors may be nil.
func MergeMultierrorWarnings(warnings ...error) string {
var warningMsg multierror.Error
for _, warn := range warnings {
if warn != nil {
multierror.Append(&warningMsg, warn)
}
}
if len(warningMsg.Errors) == 0 {
return ""
}
// Set the formatter
warningMsg.ErrorFormat = warningsFormatter
return warningMsg.Error()
}
// warningsFormatter is used to format job warnings
func warningsFormatter(es []error) string {
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %s", err)
}
return fmt.Sprintf(
"%d warning(s):\n\n%s",
len(es), strings.Join(points, "\n"))
}
// RemoveAllocs is used to remove any allocs with the given IDs
// from the list of allocations
func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation {
// Convert remove into a set
removeSet := make(map[string]struct{})
for _, remove := range remove {
removeSet[remove.ID] = struct{}{}
}
n := len(alloc)
for i := 0; i < n; i++ {
if _, ok := removeSet[alloc[i].ID]; ok {
alloc[i], alloc[n-1] = alloc[n-1], nil
i--
n--
}
}
alloc = alloc[:n]
return alloc
}
// FilterTerminalAllocs filters out all allocations in a terminal state and
// returns the latest terminal allocations
func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) {
terminalAllocsByName := make(map[string]*Allocation)
n := len(allocs)
for i := 0; i < n; i++ {
if allocs[i].TerminalStatus() {
// Add the allocation to the terminal allocs map if it's not already
// added or has a higher create index than the one which is
// currently present.
alloc, ok := terminalAllocsByName[allocs[i].Name]
if !ok || alloc.CreateIndex < allocs[i].CreateIndex {
terminalAllocsByName[allocs[i].Name] = allocs[i]
}
// Remove the allocation
allocs[i], allocs[n-1] = allocs[n-1], nil
i--
n--
}
}
return allocs[:n], terminalAllocsByName
}
// AllocsFit checks if a given set of allocations will fit on a node.
// The netIdx can optionally be provided if its already been computed.
// If the netIdx is provided, it is assumed that the client has already
// ensured there are no collisions.
func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) {
// Compute the utilization from zero
used := new(Resources)
// Add the reserved resources of the node
if node.Reserved != nil {
if err := used.Add(node.Reserved); err != nil {
return false, "", nil, err
}
}
// For each alloc, add the resources
for _, alloc := range allocs {
if alloc.Resources != nil {
if err := used.Add(alloc.Resources); err != nil {
return false, "", nil, err
}
} else if alloc.TaskResources != nil {
// Adding the shared resource asks for the allocation to the used
// resources
if err := used.Add(alloc.SharedResources); err != nil {
return false, "", nil, err
}
// Allocations within the plan have the combined resources stripped
// to save space, so sum up the individual task resources.
for _, taskResource := range alloc.TaskResources {
if err := used.Add(taskResource); err != nil {
return false, "", nil, err
}
}
} else {
return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID)
}
}
// Check that the node resources are a super set of those
// that are being allocated
if superset, dimension := node.Resources.Superset(used); !superset {
return false, dimension, used, nil
}
// Create the network index if missing
if netIdx == nil {
netIdx = NewNetworkIndex()
defer netIdx.Release()
if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) {
return false, "reserved port collision", used, nil
}
}
// Check if the network is overcommitted
if netIdx.Overcommitted() {
return false, "bandwidth exceeded", used, nil
}
// Allocations fit!
return true, "", used, nil
}
// ScoreFit is used to score the fit based on the Google work published here:
// http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
// This is equivalent to their BestFit v3
func ScoreFit(node *Node, util *Resources) float64 {
// Determine the node availability
nodeCpu := float64(node.Resources.CPU)
if node.Reserved != nil {
nodeCpu -= float64(node.Reserved.CPU)
}
nodeMem := float64(node.Resources.MemoryMB)
if node.Reserved != nil {
nodeMem -= float64(node.Reserved.MemoryMB)
}
// Compute the free percentage
freePctCpu := 1 - (float64(util.CPU) / nodeCpu)
freePctRam := 1 - (float64(util.MemoryMB) / nodeMem)
// Total will be "maximized" the smaller the value is.
// At 100% utilization, the total is 2, while at 0% util it is 20.
total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
// Invert so that the "maximized" total represents a high-value
// score. Because the floor is 20, we simply use that as an anchor.
// This means at a perfect fit, we return 18 as the score.
score := 20.0 - total
// Bound the score, just in case
// If the score is over 18, that means we've overfit the node.
if score > 18.0 {
score = 18.0
} else if score < 0 {
score = 0
}
return score
}
// GenerateUUID is used to generate a random UUID
func GenerateUUID() string {
buf := make([]byte, 16)
if _, err := crand.Read(buf); err != nil {
panic(fmt.Errorf("failed to read random bytes: %v", err))
}
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
buf[0:4],
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16])
}
func CopySliceConstraints(s []*Constraint) []*Constraint {
l := len(s)
if l == 0 {
return nil
}
c := make([]*Constraint, l)
for i, v := range s {
c[i] = v.Copy()
}
return c
}
// VaultPoliciesSet takes the structure returned by VaultPolicies and returns
// the set of required policies
func VaultPoliciesSet(policies map[string]map[string]*Vault) []string {
set := make(map[string]struct{})
for _, tgp := range policies {
for _, tp := range tgp {
for _, p := range tp.Policies {
set[p] = struct{}{}
}
}
}
flattened := make([]string, 0, len(set))
for p := range set {
flattened = append(flattened, p)
}
return flattened
}
// DenormalizeAllocationJobs is used to attach a job to all allocations that are
// non-terminal and do not have a job already. This is useful in cases where the
// job is normalized.
func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) {
if job != nil {
for _, alloc := range allocs {
if alloc.Job == nil && !alloc.TerminalStatus() {
alloc.Job = job
}
}
}
}
// AllocName returns the name of the allocation given the input.
func AllocName(job, group string, idx uint) string {
return fmt.Sprintf("%s.%s[%d]", job, group, idx)
}
// ACLPolicyListHash returns a consistent hash for a set of policies.
func ACLPolicyListHash(policies []*ACLPolicy) string {
cacheKeyHash, err := blake2b.New256(nil)
if err != nil {
panic(err)
}
for _, policy := range policies {
cacheKeyHash.Write([]byte(policy.Name))
binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex)
}
cacheKey := string(cacheKeyHash.Sum(nil))
return cacheKey
}
// CompileACLObject compiles a set of ACL policies into an ACL object with a cache
func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) {
// Sort the policies to ensure consistent ordering
sort.Slice(policies, func(i, j int) bool {
return policies[i].Name < policies[j].Name
})
// Determine the cache key
cacheKey := ACLPolicyListHash(policies)
aclRaw, ok := cache.Get(cacheKey)
if ok {
return aclRaw.(*acl.ACL), nil
}
// Parse the policies
parsed := make([]*acl.Policy, 0, len(policies))
for _, policy := range policies {
p, err := acl.Parse(policy.Rules)
if err != nil {
return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err)
}
parsed = append(parsed, p)
}
// Create the ACL object
aclObj, err := acl.NewACL(false, parsed)
if err != nil {
return nil, fmt.Errorf("failed to construct ACL: %v", err)
}
// Update the cache
cache.Add(cacheKey, aclObj)
return aclObj, nil
}

View File

@ -1,326 +0,0 @@
package structs
import (
"fmt"
"math/rand"
"net"
"sync"
)
const (
// MinDynamicPort is the smallest dynamic port generated
MinDynamicPort = 20000
// MaxDynamicPort is the largest dynamic port generated
MaxDynamicPort = 32000
// maxRandPortAttempts is the maximum number of attempt
// to assign a random port
maxRandPortAttempts = 20
// maxValidPort is the max valid port number
maxValidPort = 65536
)
var (
// bitmapPool is used to pool the bitmaps used for port collision
// checking. They are fairly large (8K) so we can re-use them to
// avoid GC pressure. Care should be taken to call Clear() on any
// bitmap coming from the pool.
bitmapPool = new(sync.Pool)
)
// NetworkIndex is used to index the available network resources
// and the used network resources on a machine given allocations
type NetworkIndex struct {
AvailNetworks []*NetworkResource // List of available networks
AvailBandwidth map[string]int // Bandwidth by device
UsedPorts map[string]Bitmap // Ports by IP
UsedBandwidth map[string]int // Bandwidth by device
}
// NewNetworkIndex is used to construct a new network index
func NewNetworkIndex() *NetworkIndex {
return &NetworkIndex{
AvailBandwidth: make(map[string]int),
UsedPorts: make(map[string]Bitmap),
UsedBandwidth: make(map[string]int),
}
}
// Release is called when the network index is no longer needed
// to attempt to re-use some of the memory it has allocated
func (idx *NetworkIndex) Release() {
for _, b := range idx.UsedPorts {
bitmapPool.Put(b)
}
}
// Overcommitted checks if the network is overcommitted
func (idx *NetworkIndex) Overcommitted() bool {
for device, used := range idx.UsedBandwidth {
avail := idx.AvailBandwidth[device]
if used > avail {
return true
}
}
return false
}
// SetNode is used to setup the available network resources. Returns
// true if there is a collision
func (idx *NetworkIndex) SetNode(node *Node) (collide bool) {
// Add the available CIDR blocks
for _, n := range node.Resources.Networks {
if n.Device != "" {
idx.AvailNetworks = append(idx.AvailNetworks, n)
idx.AvailBandwidth[n.Device] = n.MBits
}
}
// Add the reserved resources
if r := node.Reserved; r != nil {
for _, n := range r.Networks {
if idx.AddReserved(n) {
collide = true
}
}
}
return
}
// AddAllocs is used to add the used network resources. Returns
// true if there is a collision
func (idx *NetworkIndex) AddAllocs(allocs []*Allocation) (collide bool) {
for _, alloc := range allocs {
for _, task := range alloc.TaskResources {
if len(task.Networks) == 0 {
continue
}
n := task.Networks[0]
if idx.AddReserved(n) {
collide = true
}
}
}
return
}
// AddReserved is used to add a reserved network usage, returns true
// if there is a port collision
func (idx *NetworkIndex) AddReserved(n *NetworkResource) (collide bool) {
// Add the port usage
used := idx.UsedPorts[n.IP]
if used == nil {
// Try to get a bitmap from the pool, else create
raw := bitmapPool.Get()
if raw != nil {
used = raw.(Bitmap)
used.Clear()
} else {
used, _ = NewBitmap(maxValidPort)
}
idx.UsedPorts[n.IP] = used
}
for _, ports := range [][]Port{n.ReservedPorts, n.DynamicPorts} {
for _, port := range ports {
// Guard against invalid port
if port.Value < 0 || port.Value >= maxValidPort {
return true
}
if used.Check(uint(port.Value)) {
collide = true
} else {
used.Set(uint(port.Value))
}
}
}
// Add the bandwidth
idx.UsedBandwidth[n.Device] += n.MBits
return
}
// yieldIP is used to iteratively invoke the callback with
// an available IP
func (idx *NetworkIndex) yieldIP(cb func(net *NetworkResource, ip net.IP) bool) {
inc := func(ip net.IP) {
for j := len(ip) - 1; j >= 0; j-- {
ip[j]++
if ip[j] > 0 {
break
}
}
}
for _, n := range idx.AvailNetworks {
ip, ipnet, err := net.ParseCIDR(n.CIDR)
if err != nil {
continue
}
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) {
if cb(n, ip) {
return
}
}
}
}
// AssignNetwork is used to assign network resources given an ask.
// If the ask cannot be satisfied, returns nil
func (idx *NetworkIndex) AssignNetwork(ask *NetworkResource) (out *NetworkResource, err error) {
err = fmt.Errorf("no networks available")
idx.yieldIP(func(n *NetworkResource, ip net.IP) (stop bool) {
// Convert the IP to a string
ipStr := ip.String()
// Check if we would exceed the bandwidth cap
availBandwidth := idx.AvailBandwidth[n.Device]
usedBandwidth := idx.UsedBandwidth[n.Device]
if usedBandwidth+ask.MBits > availBandwidth {
err = fmt.Errorf("bandwidth exceeded")
return
}
used := idx.UsedPorts[ipStr]
// Check if any of the reserved ports are in use
for _, port := range ask.ReservedPorts {
// Guard against invalid port
if port.Value < 0 || port.Value >= maxValidPort {
err = fmt.Errorf("invalid port %d (out of range)", port.Value)
return
}
// Check if in use
if used != nil && used.Check(uint(port.Value)) {
err = fmt.Errorf("reserved port collision")
return
}
}
// Create the offer
offer := &NetworkResource{
Device: n.Device,
IP: ipStr,
MBits: ask.MBits,
ReservedPorts: ask.ReservedPorts,
DynamicPorts: ask.DynamicPorts,
}
// Try to stochastically pick the dynamic ports as it is faster and
// lower memory usage.
var dynPorts []int
var dynErr error
dynPorts, dynErr = getDynamicPortsStochastic(used, ask)
if dynErr == nil {
goto BUILD_OFFER
}
// Fall back to the precise method if the random sampling failed.
dynPorts, dynErr = getDynamicPortsPrecise(used, ask)
if dynErr != nil {
err = dynErr
return
}
BUILD_OFFER:
for i, port := range dynPorts {
offer.DynamicPorts[i].Value = port
}
// Stop, we have an offer!
out = offer
err = nil
return true
})
return
}
// getDynamicPortsPrecise takes the nodes used port bitmap which may be nil if
// no ports have been allocated yet, the network ask and returns a set of unused
// ports to fullfil the ask's DynamicPorts or an error if it failed. An error
// means the ask can not be satisfied as the method does a precise search.
func getDynamicPortsPrecise(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) {
// Create a copy of the used ports and apply the new reserves
var usedSet Bitmap
var err error
if nodeUsed != nil {
usedSet, err = nodeUsed.Copy()
if err != nil {
return nil, err
}
} else {
usedSet, err = NewBitmap(maxValidPort)
if err != nil {
return nil, err
}
}
for _, port := range ask.ReservedPorts {
usedSet.Set(uint(port.Value))
}
// Get the indexes of the unset
availablePorts := usedSet.IndexesInRange(false, MinDynamicPort, MaxDynamicPort)
// Randomize the amount we need
numDyn := len(ask.DynamicPorts)
if len(availablePorts) < numDyn {
return nil, fmt.Errorf("dynamic port selection failed")
}
numAvailable := len(availablePorts)
for i := 0; i < numDyn; i++ {
j := rand.Intn(numAvailable)
availablePorts[i], availablePorts[j] = availablePorts[j], availablePorts[i]
}
return availablePorts[:numDyn], nil
}
// getDynamicPortsStochastic takes the nodes used port bitmap which may be nil if
// no ports have been allocated yet, the network ask and returns a set of unused
// ports to fullfil the ask's DynamicPorts or an error if it failed. An error
// does not mean the ask can not be satisfied as the method has a fixed amount
// of random probes and if these fail, the search is aborted.
func getDynamicPortsStochastic(nodeUsed Bitmap, ask *NetworkResource) ([]int, error) {
var reserved, dynamic []int
for _, port := range ask.ReservedPorts {
reserved = append(reserved, port.Value)
}
for i := 0; i < len(ask.DynamicPorts); i++ {
attempts := 0
PICK:
attempts++
if attempts > maxRandPortAttempts {
return nil, fmt.Errorf("stochastic dynamic port selection failed")
}
randPort := MinDynamicPort + rand.Intn(MaxDynamicPort-MinDynamicPort)
if nodeUsed != nil && nodeUsed.Check(uint(randPort)) {
goto PICK
}
for _, ports := range [][]int{reserved, dynamic} {
if isPortReserved(ports, randPort) {
goto PICK
}
}
dynamic = append(dynamic, randPort)
}
return dynamic, nil
}
// IntContains scans an integer slice for a value
func isPortReserved(haystack []int, needle int) bool {
for _, item := range haystack {
if item == needle {
return true
}
}
return false
}

View File

@ -1,94 +0,0 @@
package structs
import (
"fmt"
"strings"
"github.com/mitchellh/hashstructure"
)
const (
// NodeUniqueNamespace is a prefix that can be appended to node meta or
// attribute keys to mark them for exclusion in computed node class.
NodeUniqueNamespace = "unique."
)
// UniqueNamespace takes a key and returns the key marked under the unique
// namespace.
func UniqueNamespace(key string) string {
return fmt.Sprintf("%s%s", NodeUniqueNamespace, key)
}
// IsUniqueNamespace returns whether the key is under the unique namespace.
func IsUniqueNamespace(key string) bool {
return strings.HasPrefix(key, NodeUniqueNamespace)
}
// ComputeClass computes a derived class for the node based on its attributes.
// ComputedClass is a unique id that identifies nodes with a common set of
// attributes and capabilities. Thus, when calculating a node's computed class
// we avoid including any uniquely identifing fields.
func (n *Node) ComputeClass() error {
hash, err := hashstructure.Hash(n, nil)
if err != nil {
return err
}
n.ComputedClass = fmt.Sprintf("v1:%d", hash)
return nil
}
// HashInclude is used to blacklist uniquely identifying node fields from being
// included in the computed node class.
func (n Node) HashInclude(field string, v interface{}) (bool, error) {
switch field {
case "Datacenter", "Attributes", "Meta", "NodeClass":
return true, nil
default:
return false, nil
}
}
// HashIncludeMap is used to blacklist uniquely identifying node map keys from being
// included in the computed node class.
func (n Node) HashIncludeMap(field string, k, v interface{}) (bool, error) {
key, ok := k.(string)
if !ok {
return false, fmt.Errorf("map key %v not a string", k)
}
switch field {
case "Meta", "Attributes":
return !IsUniqueNamespace(key), nil
default:
return false, fmt.Errorf("unexpected map field: %v", field)
}
}
// EscapedConstraints takes a set of constraints and returns the set that
// escapes computed node classes.
func EscapedConstraints(constraints []*Constraint) []*Constraint {
var escaped []*Constraint
for _, c := range constraints {
if constraintTargetEscapes(c.LTarget) || constraintTargetEscapes(c.RTarget) {
escaped = append(escaped, c)
}
}
return escaped
}
// constraintTargetEscapes returns whether the target of a constraint escapes
// computed node class optimization.
func constraintTargetEscapes(target string) bool {
switch {
case strings.HasPrefix(target, "${node.unique."):
return true
case strings.HasPrefix(target, "${attr.unique."):
return true
case strings.HasPrefix(target, "${meta.unique."):
return true
default:
return false
}
}

View File

@ -1,49 +0,0 @@
package structs
import (
"github.com/hashicorp/raft"
)
// RaftServer has information about a server in the Raft configuration.
type RaftServer struct {
// ID is the unique ID for the server. These are currently the same
// as the address, but they will be changed to a real GUID in a future
// release of Nomad.
ID raft.ServerID
// Node is the node name of the server, as known by Nomad, or this
// will be set to "(unknown)" otherwise.
Node string
// Address is the IP:port of the server, used for Raft communications.
Address raft.ServerAddress
// Leader is true if this server is the current cluster leader.
Leader bool
// Voter is true if this server has a vote in the cluster. This might
// be false if the server is staging and still coming online, or if
// it's a non-voting server, which will be added in a future release of
// Nomad.
Voter bool
}
// RaftConfigrationResponse is returned when querying for the current Raft
// configuration.
type RaftConfigurationResponse struct {
// Servers has the list of servers in the Raft configuration.
Servers []*RaftServer
// Index has the Raft index of this configuration.
Index uint64
}
// RaftPeerByAddressRequest is used by the Operator endpoint to apply a Raft
// operation on a specific Raft peer by address in the form of "IP:port".
type RaftPeerByAddressRequest struct {
// Address is the peer to remove, in the form "IP:port".
Address raft.ServerAddress
// WriteRequest holds the Region for this request.
WriteRequest
}

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +0,0 @@
package structs
//go:generate codecgen -d 100 -o structs.generated.go structs.go

24
vendor/vendor.json vendored
View File

@ -1147,28 +1147,28 @@
"revisionTime": "2017-09-14T15:46:24Z"
},
{
"checksumSHA1": "4tY6k1MqB50R66TJJH/rsG69Yd4=",
"checksumSHA1": "euodRTxiXS6udU7N9xRCQL6YDCg=",
"path": "github.com/hashicorp/nomad/api",
"revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e",
"revisionTime": "2017-09-20T19:48:06Z"
"revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1",
"revisionTime": "2017-09-29T21:44:31Z"
},
{
"checksumSHA1": "Is7OvHxCEEkKpdQnW8olCxL0444=",
"path": "github.com/hashicorp/nomad/api/contexts",
"revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e",
"revisionTime": "2017-09-20T19:48:06Z"
"revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1",
"revisionTime": "2017-09-29T21:44:31Z"
},
{
"checksumSHA1": "GpikwcF9oi5Rrs/58xDSfiMy/I8=",
"checksumSHA1": "DE+4s/X+r987Ia93s9633mGekzg=",
"path": "github.com/hashicorp/nomad/helper",
"revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e",
"revisionTime": "2017-09-20T19:48:06Z"
"revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1",
"revisionTime": "2017-09-29T21:44:31Z"
},
{
"checksumSHA1": "hrzGvgMsH9p6MKOu3zYS8fooL3g=",
"path": "github.com/hashicorp/nomad/nomad/structs",
"revision": "a1e08240a1cff7d1bd10c89d722110a053f8dc6e",
"revisionTime": "2017-09-20T19:48:06Z"
"checksumSHA1": "mSCo/iZUEOSpeX5NsGZZzFMJqto=",
"path": "github.com/hashicorp/nomad/helper/uuid",
"revision": "ebc79fba332501f71f272ade9382d67a47c5b4e1",
"revisionTime": "2017-09-29T21:44:31Z"
},
{
"checksumSHA1": "/oss17GO4hXGM7QnUdI3VzcAHzA=",