fixing tests
This commit is contained in:
parent
bac5cb1e8b
commit
52f9cd7637
|
@ -1012,7 +1012,7 @@ func TestAllocRunner_TaskFailed_KillTG(t *testing.T) {
|
|||
"start_error": "fail task please",
|
||||
}
|
||||
ar.alloc.Job.TaskGroups[0].Tasks = append(ar.alloc.Job.TaskGroups[0].Tasks, task2)
|
||||
ar.alloc.TaskResources[task2.Name] = task2.Resources
|
||||
ar.alloc.AllocatedResources.Tasks[task2.Name] = ar.alloc.AllocatedResources.Tasks[task.Name].Copy()
|
||||
go ar.Run()
|
||||
defer ar.Destroy()
|
||||
|
||||
|
@ -1081,7 +1081,7 @@ func TestAllocRunner_TaskLeader_KillTG(t *testing.T) {
|
|||
"run_for": "1s",
|
||||
}
|
||||
ar.alloc.Job.TaskGroups[0].Tasks = append(ar.alloc.Job.TaskGroups[0].Tasks, task2)
|
||||
ar.alloc.TaskResources[task2.Name] = task2.Resources
|
||||
ar.alloc.AllocatedResources.Tasks[task2.Name] = ar.alloc.AllocatedResources.Tasks[task.Name].Copy()
|
||||
go ar.Run()
|
||||
defer ar.Destroy()
|
||||
|
||||
|
@ -1165,7 +1165,9 @@ func TestAllocRunner_TaskLeader_StopTG(t *testing.T) {
|
|||
"run_for": "10s",
|
||||
}
|
||||
ar.alloc.Job.TaskGroups[0].Tasks = append(ar.alloc.Job.TaskGroups[0].Tasks, task2, task3)
|
||||
ar.alloc.TaskResources[task2.Name] = task2.Resources
|
||||
ar.alloc.AllocatedResources.Tasks[task.Name] = ar.alloc.AllocatedResources.Tasks["web"].Copy()
|
||||
ar.alloc.AllocatedResources.Tasks[task2.Name] = ar.alloc.AllocatedResources.Tasks[task.Name].Copy()
|
||||
ar.alloc.AllocatedResources.Tasks[task3.Name] = ar.alloc.AllocatedResources.Tasks[task.Name].Copy()
|
||||
defer ar.Destroy()
|
||||
|
||||
go ar.Run()
|
||||
|
@ -1253,7 +1255,8 @@ func TestAllocRunner_TaskLeader_StopRestoredTG(t *testing.T) {
|
|||
}
|
||||
|
||||
ar.alloc.Job.TaskGroups[0].Tasks = append(ar.alloc.Job.TaskGroups[0].Tasks, task2)
|
||||
ar.alloc.TaskResources[task2.Name] = task2.Resources
|
||||
ar.alloc.AllocatedResources.Tasks[task.Name] = ar.alloc.AllocatedResources.Tasks["web"].Copy()
|
||||
ar.alloc.AllocatedResources.Tasks[task2.Name] = ar.alloc.AllocatedResources.Tasks[task.Name].Copy()
|
||||
|
||||
// Mimic Nomad exiting before the leader stopping is able to stop other tasks.
|
||||
ar.tasks = map[string]*taskrunner.TaskRunner{
|
||||
|
|
110
client/client.go
110
client/client.go
|
@ -2281,26 +2281,24 @@ func (c *Client) setGaugeForAllocationStats(nodeID string) {
|
|||
c.configLock.RLock()
|
||||
node := c.configCopy.Node
|
||||
c.configLock.RUnlock()
|
||||
total := node.Resources
|
||||
res := node.Reserved
|
||||
allocated := c.getAllocatedResources(node)
|
||||
total := node.NodeResources
|
||||
res := node.ReservedResources
|
||||
allocated := c.getAllocatedResources2(node)
|
||||
|
||||
// Emit allocated
|
||||
if !c.config.DisableTaggedMetrics {
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocated", "memory"}, float32(allocated.MemoryMB), c.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocated", "disk"}, float32(allocated.DiskMB), c.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocated", "cpu"}, float32(allocated.CPU), c.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocated", "iops"}, float32(allocated.IOPS), c.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocated", "memory"}, float32(allocated.Flattened.Memory.MemoryMB), c.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocated", "disk"}, float32(allocated.Shared.DiskMB), c.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "allocated", "cpu"}, float32(allocated.Flattened.Cpu.CpuShares), c.baseLabels)
|
||||
}
|
||||
|
||||
if c.config.BackwardsCompatibleMetrics {
|
||||
metrics.SetGauge([]string{"client", "allocated", "memory", nodeID}, float32(allocated.MemoryMB))
|
||||
metrics.SetGauge([]string{"client", "allocated", "disk", nodeID}, float32(allocated.DiskMB))
|
||||
metrics.SetGauge([]string{"client", "allocated", "cpu", nodeID}, float32(allocated.CPU))
|
||||
metrics.SetGauge([]string{"client", "allocated", "iops", nodeID}, float32(allocated.IOPS))
|
||||
metrics.SetGauge([]string{"client", "allocated", "memory", nodeID}, float32(allocated.Flattened.Memory.MemoryMB))
|
||||
metrics.SetGauge([]string{"client", "allocated", "disk", nodeID}, float32(allocated.Shared.DiskMB))
|
||||
metrics.SetGauge([]string{"client", "allocated", "cpu", nodeID}, float32(allocated.Flattened.Cpu.CpuShares))
|
||||
}
|
||||
|
||||
for _, n := range allocated.Networks {
|
||||
for _, n := range allocated.Flattened.Networks {
|
||||
if !c.config.DisableTaggedMetrics {
|
||||
labels := append(c.baseLabels, metrics.Label{
|
||||
Name: "device",
|
||||
|
@ -2315,34 +2313,32 @@ func (c *Client) setGaugeForAllocationStats(nodeID string) {
|
|||
}
|
||||
|
||||
// Emit unallocated
|
||||
unallocatedMem := total.MemoryMB - res.MemoryMB - allocated.MemoryMB
|
||||
unallocatedDisk := total.DiskMB - res.DiskMB - allocated.DiskMB
|
||||
unallocatedCpu := total.CPU - res.CPU - allocated.CPU
|
||||
unallocatedIops := total.IOPS - res.IOPS - allocated.IOPS
|
||||
unallocatedMem := total.Memory.MemoryMB - res.Memory.MemoryMB - allocated.Flattened.Memory.MemoryMB
|
||||
unallocatedDisk := total.Disk.DiskMB - res.Disk.DiskMB - allocated.Shared.DiskMB
|
||||
unallocatedCpu := total.Cpu.TotalShares - res.Cpu.TotalShares - allocated.Flattened.Cpu.CpuShares
|
||||
|
||||
if !c.config.DisableTaggedMetrics {
|
||||
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "memory"}, float32(unallocatedMem), c.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "disk"}, float32(unallocatedDisk), c.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "cpu"}, float32(unallocatedCpu), c.baseLabels)
|
||||
metrics.SetGaugeWithLabels([]string{"client", "unallocated", "iops"}, float32(unallocatedIops), c.baseLabels)
|
||||
}
|
||||
|
||||
if c.config.BackwardsCompatibleMetrics {
|
||||
metrics.SetGauge([]string{"client", "unallocated", "memory", nodeID}, float32(unallocatedMem))
|
||||
metrics.SetGauge([]string{"client", "unallocated", "disk", nodeID}, float32(unallocatedDisk))
|
||||
metrics.SetGauge([]string{"client", "unallocated", "cpu", nodeID}, float32(unallocatedCpu))
|
||||
metrics.SetGauge([]string{"client", "unallocated", "iops", nodeID}, float32(unallocatedIops))
|
||||
}
|
||||
|
||||
for _, n := range allocated.Networks {
|
||||
totalIdx := total.NetIndex(n)
|
||||
totalComparable := total.Comparable()
|
||||
for _, n := range totalComparable.Flattened.Networks {
|
||||
// Determined the used resources
|
||||
var usedMbits int
|
||||
totalIdx := allocated.Flattened.Networks.NetIndex(n)
|
||||
if totalIdx != -1 {
|
||||
continue
|
||||
usedMbits = allocated.Flattened.Networks[totalIdx].MBits
|
||||
}
|
||||
|
||||
totalMbits := total.Networks[totalIdx].MBits
|
||||
unallocatedMbits := totalMbits - n.MBits
|
||||
|
||||
unallocatedMbits := n.MBits - usedMbits
|
||||
if !c.config.DisableTaggedMetrics {
|
||||
labels := append(c.baseLabels, metrics.Label{
|
||||
Name: "device",
|
||||
|
@ -2421,7 +2417,9 @@ func (c *Client) emitClientMetrics() {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO
|
||||
func (c *Client) getAllocatedResources(selfNode *structs.Node) *structs.Resources {
|
||||
return &structs.Resources{}
|
||||
// Unfortunately the allocs only have IP so we need to match them to the
|
||||
// device
|
||||
cidrToDevice := make(map[*net.IPNet]string, len(selfNode.Resources.Networks))
|
||||
|
@ -2465,6 +2463,70 @@ func (c *Client) getAllocatedResources(selfNode *structs.Node) *structs.Resource
|
|||
return &allocated
|
||||
}
|
||||
|
||||
func (c *Client) getAllocatedResources2(selfNode *structs.Node) *structs.ComparableAllocatedResources {
|
||||
// Unfortunately the allocs only have IP so we need to match them to the
|
||||
// device
|
||||
cidrToDevice := make(map[*net.IPNet]string, len(selfNode.Resources.Networks))
|
||||
for _, n := range selfNode.NodeResources.Networks {
|
||||
_, ipnet, err := net.ParseCIDR(n.CIDR)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
cidrToDevice[ipnet] = n.Device
|
||||
}
|
||||
|
||||
// Sum the allocated resources
|
||||
allocs := c.allAllocs()
|
||||
var allocated structs.ComparableAllocatedResources
|
||||
allocatedDeviceMbits := make(map[string]int)
|
||||
for _, alloc := range allocs {
|
||||
if alloc.TerminalStatus() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add the resources
|
||||
// COMPAT(0.11): Just use the allocated resources
|
||||
allocated.Add(alloc.ComparableResources())
|
||||
|
||||
// Add the used network
|
||||
if alloc.AllocatedResources != nil {
|
||||
for _, tr := range alloc.AllocatedResources.Tasks {
|
||||
for _, allocatedNetwork := range tr.Networks {
|
||||
for cidr, dev := range cidrToDevice {
|
||||
ip := net.ParseIP(allocatedNetwork.IP)
|
||||
if cidr.Contains(ip) {
|
||||
allocatedDeviceMbits[dev] += allocatedNetwork.MBits
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if alloc.Resources != nil {
|
||||
for _, allocatedNetwork := range alloc.Resources.Networks {
|
||||
for cidr, dev := range cidrToDevice {
|
||||
ip := net.ParseIP(allocatedNetwork.IP)
|
||||
if cidr.Contains(ip) {
|
||||
allocatedDeviceMbits[dev] += allocatedNetwork.MBits
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the networks
|
||||
allocated.Flattened.Networks = nil
|
||||
for dev, speed := range allocatedDeviceMbits {
|
||||
net := &structs.NetworkResource{
|
||||
Device: dev,
|
||||
MBits: speed,
|
||||
}
|
||||
allocated.Flattened.Networks = append(allocated.Flattened.Networks, net)
|
||||
}
|
||||
|
||||
return &allocated
|
||||
}
|
||||
|
||||
// allAllocs returns all the allocations managed by the client
|
||||
func (c *Client) allAllocs() map[string]*structs.Allocation {
|
||||
ars := c.getAllocRunners()
|
||||
|
|
|
@ -177,7 +177,7 @@ func setupTaskEnv(t *testing.T, driver string) (*allocdir.TaskDir, map[string]st
|
|||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].Tasks[0] = task
|
||||
alloc.Name = "Bar"
|
||||
alloc.TaskResources["web"].Networks[0].DynamicPorts[0].Value = 2000
|
||||
alloc.AllocatedResources.Tasks["web"].Networks[0].DynamicPorts[0].Value = 2000
|
||||
conf := testConfig(t)
|
||||
allocDir := allocdir.NewAllocDir(testlog.Logger(t), filepath.Join(conf.AllocDir, alloc.ID))
|
||||
taskDir := allocDir.NewTaskDir(task.Name)
|
||||
|
|
|
@ -207,8 +207,8 @@ type Builder struct {
|
|||
// secretsDir from task's perspective; eg /secrets
|
||||
secretsDir string
|
||||
|
||||
cpuLimit int
|
||||
memLimit int
|
||||
cpuLimit uint64
|
||||
memLimit uint64
|
||||
taskName string
|
||||
allocIndex int
|
||||
datacenter string
|
||||
|
@ -272,10 +272,10 @@ func (b *Builder) Build() *TaskEnv {
|
|||
|
||||
// Add the resource limits
|
||||
if b.memLimit != 0 {
|
||||
envMap[MemLimit] = strconv.Itoa(b.memLimit)
|
||||
envMap[MemLimit] = strconv.FormatUint(b.memLimit, 10)
|
||||
}
|
||||
if b.cpuLimit != 0 {
|
||||
envMap[CpuLimit] = strconv.Itoa(b.cpuLimit)
|
||||
envMap[CpuLimit] = strconv.FormatUint(b.cpuLimit, 10)
|
||||
}
|
||||
|
||||
// Add the task metadata
|
||||
|
@ -370,13 +370,15 @@ func (b *Builder) setTask(task *structs.Task) *Builder {
|
|||
for k, v := range task.Env {
|
||||
b.envvars[k] = v
|
||||
}
|
||||
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
if task.Resources == nil {
|
||||
b.memLimit = 0
|
||||
b.cpuLimit = 0
|
||||
b.networks = []*structs.NetworkResource{}
|
||||
} else {
|
||||
b.memLimit = task.Resources.MemoryMB
|
||||
b.cpuLimit = task.Resources.CPU
|
||||
b.memLimit = uint64(task.Resources.MemoryMB)
|
||||
b.cpuLimit = uint64(task.Resources.CPU)
|
||||
// Copy networks to prevent sharing
|
||||
b.networks = make([]*structs.NetworkResource, len(task.Resources.Networks))
|
||||
for i, n := range task.Resources.Networks {
|
||||
|
@ -419,18 +421,50 @@ func (b *Builder) setAlloc(alloc *structs.Allocation) *Builder {
|
|||
b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, k)] = v
|
||||
}
|
||||
|
||||
// Add ports from other tasks
|
||||
b.otherPorts = make(map[string]string, len(alloc.TaskResources)*2)
|
||||
for taskName, resources := range alloc.TaskResources {
|
||||
if taskName == b.taskName {
|
||||
continue
|
||||
}
|
||||
for _, nw := range resources.Networks {
|
||||
for _, p := range nw.ReservedPorts {
|
||||
addPort(b.otherPorts, taskName, nw.IP, p.Label, p.Value)
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
b.otherPorts = make(map[string]string, len(alloc.Job.LookupTaskGroup(alloc.TaskGroup).Tasks)*2)
|
||||
if alloc.AllocatedResources != nil {
|
||||
// Populate task resources
|
||||
if tr, ok := alloc.AllocatedResources.Tasks[b.taskName]; ok {
|
||||
b.cpuLimit = tr.Cpu.CpuShares
|
||||
b.memLimit = tr.Memory.MemoryMB
|
||||
|
||||
// Copy networks to prevent sharing
|
||||
b.networks = make([]*structs.NetworkResource, len(tr.Networks))
|
||||
for i, n := range tr.Networks {
|
||||
b.networks[i] = n.Copy()
|
||||
}
|
||||
for _, p := range nw.DynamicPorts {
|
||||
addPort(b.otherPorts, taskName, nw.IP, p.Label, p.Value)
|
||||
}
|
||||
|
||||
// Add ports from other tasks
|
||||
for taskName, resources := range alloc.AllocatedResources.Tasks {
|
||||
// Add ports from other tasks
|
||||
if taskName == b.taskName {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, nw := range resources.Networks {
|
||||
for _, p := range nw.ReservedPorts {
|
||||
addPort(b.otherPorts, taskName, nw.IP, p.Label, p.Value)
|
||||
}
|
||||
for _, p := range nw.DynamicPorts {
|
||||
addPort(b.otherPorts, taskName, nw.IP, p.Label, p.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if alloc.TaskResources != nil {
|
||||
for taskName, resources := range alloc.TaskResources {
|
||||
// Add ports from other tasks
|
||||
if taskName == b.taskName {
|
||||
continue
|
||||
}
|
||||
for _, nw := range resources.Networks {
|
||||
for _, p := range nw.ReservedPorts {
|
||||
addPort(b.otherPorts, taskName, nw.IP, p.Label, p.Value)
|
||||
}
|
||||
for _, p := range nw.DynamicPorts {
|
||||
addPort(b.otherPorts, taskName, nw.IP, p.Label, p.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -137,11 +137,117 @@ func TestEnvironment_AsList(t *testing.T) {
|
|||
"metaKey": "metaVal",
|
||||
}
|
||||
a := mock.Alloc()
|
||||
a.Resources.Networks[0].ReservedPorts = append(a.Resources.Networks[0].ReservedPorts,
|
||||
structs.Port{Label: "ssh", Value: 22},
|
||||
structs.Port{Label: "other", Value: 1234},
|
||||
a.AllocatedResources.Tasks["web"].Networks[0] = &structs.NetworkResource{
|
||||
Device: "eth0",
|
||||
IP: "127.0.0.1",
|
||||
ReservedPorts: []structs.Port{{Label: "https", Value: 8080}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 80}},
|
||||
}
|
||||
a.AllocatedResources.Tasks["ssh"] = &structs.AllocatedTaskResources{
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
MBits: 50,
|
||||
ReservedPorts: []structs.Port{
|
||||
{Label: "ssh", Value: 22},
|
||||
{Label: "other", Value: 1234},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
task := a.Job.TaskGroups[0].Tasks[0]
|
||||
task.Env = map[string]string{
|
||||
"taskEnvKey": "taskEnvVal",
|
||||
}
|
||||
env := NewBuilder(n, a, task, "global").SetDriverNetwork(
|
||||
&cstructs.DriverNetwork{PortMap: map[string]int{"https": 443}},
|
||||
)
|
||||
a.TaskResources["web"].Networks[0].DynamicPorts[0].Value = 2000
|
||||
|
||||
act := env.Build().List()
|
||||
exp := []string{
|
||||
"taskEnvKey=taskEnvVal",
|
||||
"NOMAD_ADDR_http=127.0.0.1:80",
|
||||
"NOMAD_PORT_http=80",
|
||||
"NOMAD_IP_http=127.0.0.1",
|
||||
"NOMAD_ADDR_https=127.0.0.1:8080",
|
||||
"NOMAD_PORT_https=443",
|
||||
"NOMAD_IP_https=127.0.0.1",
|
||||
"NOMAD_HOST_PORT_http=80",
|
||||
"NOMAD_HOST_PORT_https=8080",
|
||||
"NOMAD_TASK_NAME=web",
|
||||
"NOMAD_GROUP_NAME=web",
|
||||
"NOMAD_ADDR_ssh_other=192.168.0.100:1234",
|
||||
"NOMAD_ADDR_ssh_ssh=192.168.0.100:22",
|
||||
"NOMAD_IP_ssh_other=192.168.0.100",
|
||||
"NOMAD_IP_ssh_ssh=192.168.0.100",
|
||||
"NOMAD_PORT_ssh_other=1234",
|
||||
"NOMAD_PORT_ssh_ssh=22",
|
||||
"NOMAD_CPU_LIMIT=500",
|
||||
"NOMAD_DC=dc1",
|
||||
"NOMAD_REGION=global",
|
||||
"NOMAD_MEMORY_LIMIT=256",
|
||||
"NOMAD_META_ELB_CHECK_INTERVAL=30s",
|
||||
"NOMAD_META_ELB_CHECK_MIN=3",
|
||||
"NOMAD_META_ELB_CHECK_TYPE=http",
|
||||
"NOMAD_META_FOO=bar",
|
||||
"NOMAD_META_OWNER=armon",
|
||||
"NOMAD_META_elb_check_interval=30s",
|
||||
"NOMAD_META_elb_check_min=3",
|
||||
"NOMAD_META_elb_check_type=http",
|
||||
"NOMAD_META_foo=bar",
|
||||
"NOMAD_META_owner=armon",
|
||||
"NOMAD_JOB_NAME=my-job",
|
||||
fmt.Sprintf("NOMAD_ALLOC_ID=%s", a.ID),
|
||||
"NOMAD_ALLOC_INDEX=0",
|
||||
}
|
||||
sort.Strings(act)
|
||||
sort.Strings(exp)
|
||||
require.Equal(t, exp, act)
|
||||
}
|
||||
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
func TestEnvironment_AsList_Old(t *testing.T) {
|
||||
n := mock.Node()
|
||||
n.Meta = map[string]string{
|
||||
"metaKey": "metaVal",
|
||||
}
|
||||
a := mock.Alloc()
|
||||
a.AllocatedResources = nil
|
||||
a.Resources = &structs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
DiskMB: 150,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{
|
||||
{Label: "admin", Value: 5000},
|
||||
{Label: "ssh", Value: 22},
|
||||
{Label: "other", Value: 1234},
|
||||
},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
a.TaskResources = map[string]*structs.Resources{
|
||||
"web": {
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 2000}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
a.TaskResources["ssh"] = &structs.Resources{
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
|
@ -209,15 +315,7 @@ func TestEnvironment_AsList(t *testing.T) {
|
|||
}
|
||||
sort.Strings(act)
|
||||
sort.Strings(exp)
|
||||
if len(act) != len(exp) {
|
||||
t.Fatalf("expected %d vars != %d actual, actual:\n%s\n\nexpected:\n%s\n",
|
||||
len(act), len(exp), strings.Join(act, "\n"), strings.Join(exp, "\n"))
|
||||
}
|
||||
for i := range act {
|
||||
if act[i] != exp[i] {
|
||||
t.Errorf("%d actual %q != %q expected", i, act[i], exp[i])
|
||||
}
|
||||
}
|
||||
require.Equal(t, exp, act)
|
||||
}
|
||||
|
||||
func TestEnvironment_VaultToken(t *testing.T) {
|
||||
|
|
|
@ -165,7 +165,7 @@ func (f *EnvAWSFingerprint) Fingerprint(request *cstructs.FingerprintRequest, re
|
|||
}
|
||||
|
||||
newNetwork.MBits = throughput
|
||||
response.Resources = &structs.Resources{
|
||||
response.NodeResources = &structs.NodeResources{
|
||||
Networks: []*structs.NetworkResource{newNetwork},
|
||||
}
|
||||
|
||||
|
|
|
@ -182,12 +182,12 @@ func TestNetworkFingerprint_AWS(t *testing.T) {
|
|||
|
||||
assertNodeAttributeContains(t, response.Attributes, "unique.network.ip-address")
|
||||
|
||||
if response.Resources == nil || len(response.Resources.Networks) == 0 {
|
||||
if response.NodeResources == nil || len(response.NodeResources.Networks) == 0 {
|
||||
t.Fatal("Expected to find Network Resources")
|
||||
}
|
||||
|
||||
// Test at least the first Network Resource
|
||||
net := response.Resources.Networks[0]
|
||||
net := response.NodeResources.Networks[0]
|
||||
if net.IP == "" {
|
||||
t.Fatal("Expected Network Resource to have an IP")
|
||||
}
|
||||
|
@ -236,12 +236,12 @@ func TestNetworkFingerprint_AWS_network(t *testing.T) {
|
|||
|
||||
assertNodeAttributeContains(t, response.Attributes, "unique.network.ip-address")
|
||||
|
||||
if response.Resources == nil || len(response.Resources.Networks) == 0 {
|
||||
if response.NodeResources == nil || len(response.NodeResources.Networks) == 0 {
|
||||
t.Fatal("Expected to find Network Resources")
|
||||
}
|
||||
|
||||
// Test at least the first Network Resource
|
||||
net := response.Resources.Networks[0]
|
||||
net := response.NodeResources.Networks[0]
|
||||
if net.IP == "" {
|
||||
t.Fatal("Expected Network Resource to have an IP")
|
||||
}
|
||||
|
@ -275,12 +275,12 @@ func TestNetworkFingerprint_AWS_network(t *testing.T) {
|
|||
|
||||
assertNodeAttributeContains(t, response.Attributes, "unique.network.ip-address")
|
||||
|
||||
if response.Resources == nil || len(response.Resources.Networks) == 0 {
|
||||
if response.NodeResources == nil || len(response.NodeResources.Networks) == 0 {
|
||||
t.Fatal("Expected to find Network Resources")
|
||||
}
|
||||
|
||||
// Test at least the first Network Resource
|
||||
net := response.Resources.Networks[0]
|
||||
net := response.NodeResources.Networks[0]
|
||||
if net.IP == "" {
|
||||
t.Fatal("Expected Network Resource to have an IP")
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ func (f *StorageFingerprint) Fingerprint(req *cstructs.FingerprintRequest, resp
|
|||
}
|
||||
resp.NodeResources = &structs.NodeResources{
|
||||
Disk: structs.NodeDiskResources{
|
||||
DiskMB: uint64(free / bytesPerMegabyte),
|
||||
DiskMB: free / bytesPerMegabyte,
|
||||
},
|
||||
}
|
||||
resp.Detected = true
|
||||
|
|
30
client/gc.go
30
client/gc.go
|
@ -260,10 +260,13 @@ func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) e
|
|||
a.destroyAllocRunner(gcAlloc.allocRunner, fmt.Sprintf("new allocations and over max (%d)", a.config.MaxAllocs))
|
||||
}
|
||||
|
||||
totalResource := &structs.Resources{}
|
||||
totalResource := &structs.AllocatedSharedResources{}
|
||||
for _, alloc := range allocations {
|
||||
if err := totalResource.Add(alloc.Resources); err != nil {
|
||||
return err
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
if alloc.AllocatedResources != nil {
|
||||
totalResource.Add(&alloc.AllocatedResources.Shared)
|
||||
} else {
|
||||
totalResource.DiskMB += uint64(alloc.Resources.DiskMB)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -276,12 +279,12 @@ func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) e
|
|||
} else {
|
||||
availableForAllocations = hostStats.AllocDirStats.Available - uint64(a.config.ReservedDiskMB*MB)
|
||||
}
|
||||
if uint64(totalResource.DiskMB*MB) < availableForAllocations {
|
||||
if totalResource.DiskMB*MB < availableForAllocations {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var diskCleared int
|
||||
var diskCleared uint64
|
||||
for {
|
||||
select {
|
||||
case <-a.shutdownCh:
|
||||
|
@ -299,7 +302,7 @@ func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) e
|
|||
}
|
||||
|
||||
if allocDirStats != nil {
|
||||
if allocDirStats.Available >= uint64(totalResource.DiskMB*MB) {
|
||||
if allocDirStats.Available >= totalResource.DiskMB*MB {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
|
@ -318,11 +321,18 @@ func (a *AllocGarbageCollector) MakeRoomFor(allocations []*structs.Allocation) e
|
|||
ar := gcAlloc.allocRunner
|
||||
alloc := ar.Alloc()
|
||||
|
||||
// Destroy the alloc runner and wait until it exits
|
||||
a.destroyAllocRunner(ar, fmt.Sprintf("freeing %d MB for new allocations", alloc.Resources.DiskMB))
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
var allocDiskMB uint64
|
||||
if alloc.AllocatedResources != nil {
|
||||
allocDiskMB = alloc.AllocatedResources.Shared.DiskMB
|
||||
} else {
|
||||
allocDiskMB = uint64(alloc.Resources.DiskMB)
|
||||
}
|
||||
|
||||
// Call stats collect again
|
||||
diskCleared += alloc.Resources.DiskMB
|
||||
// Destroy the alloc runner and wait until it exits
|
||||
a.destroyAllocRunner(ar, fmt.Sprintf("freeing %d MB for new allocations", allocDiskMB))
|
||||
|
||||
diskCleared += allocDiskMB
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_EnoughSpace(t *testing.T)
|
|||
statsCollector.inodePercents = []float64{0}
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Resources.DiskMB = 150
|
||||
alloc.AllocatedResources.Shared.DiskMB = 150
|
||||
if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -233,7 +233,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Partial(t *testing.T) {
|
|||
statsCollector.inodePercents = []float64{0, 0, 0}
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Resources.DiskMB = 150
|
||||
alloc.AllocatedResources.Shared.DiskMB = 150
|
||||
if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_All(t *testing.T) {
|
|||
statsCollector.inodePercents = []float64{0, 0, 0}
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Resources.DiskMB = 150
|
||||
alloc.AllocatedResources.Shared.DiskMB = 150
|
||||
if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
@ -304,7 +304,7 @@ func TestAllocGarbageCollector_MakeRoomForAllocations_GC_Fallback(t *testing.T)
|
|||
exitAllocRunner(ar1, ar2)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Resources.DiskMB = 150
|
||||
alloc.AllocatedResources.Shared.DiskMB = 150
|
||||
if err := gc.MakeRoomFor([]*structs.Allocation{alloc}); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
|
|
@ -739,37 +739,17 @@ func (a *Agent) agentHTTPCheck(server bool) *structs.ServiceCheck {
|
|||
// reservePortsForClient reserves a range of ports for the client to use when
|
||||
// it creates various plugins for log collection, executors, drivers, etc
|
||||
func (a *Agent) reservePortsForClient(conf *clientconfig.Config) error {
|
||||
// finding the device name for loopback
|
||||
deviceName, addr, mask, err := a.findLoopbackDevice()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding the device name for loopback: %v", err)
|
||||
if conf.Node.ReservedResources == nil {
|
||||
conf.Node.ReservedResources = &structs.NodeReservedResources{}
|
||||
}
|
||||
|
||||
// seeing if the user has already reserved some resources on this device
|
||||
var nr *structs.NetworkResource
|
||||
if conf.Node.Reserved == nil {
|
||||
conf.Node.Reserved = &structs.Resources{}
|
||||
res := conf.Node.ReservedResources.Networks.ReservedHostPorts
|
||||
if res == "" {
|
||||
res = fmt.Sprintf("%d-%d", conf.ClientMinPort, conf.ClientMaxPort)
|
||||
} else {
|
||||
res += fmt.Sprintf(",%d-%d", conf.ClientMinPort, conf.ClientMaxPort)
|
||||
}
|
||||
for _, n := range conf.Node.Reserved.Networks {
|
||||
if n.Device == deviceName {
|
||||
nr = n
|
||||
}
|
||||
}
|
||||
// If the user hasn't already created the device, we create it
|
||||
if nr == nil {
|
||||
nr = &structs.NetworkResource{
|
||||
Device: deviceName,
|
||||
IP: addr,
|
||||
CIDR: mask,
|
||||
ReservedPorts: make([]structs.Port, 0),
|
||||
}
|
||||
}
|
||||
// appending the port ranges we want to use for the client to the list of
|
||||
// reserved ports for this device
|
||||
for i := conf.ClientMinPort; i <= conf.ClientMaxPort; i++ {
|
||||
nr.ReservedPorts = append(nr.ReservedPorts, structs.Port{Label: fmt.Sprintf("plugin-%d", i), Value: int(i)})
|
||||
}
|
||||
conf.Node.Reserved.Networks = append(conf.Node.Reserved.Networks, nr)
|
||||
conf.Node.ReservedResources.Networks.ReservedHostPorts = res
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -546,7 +546,7 @@ type Resources struct {
|
|||
ReservedPorts string `mapstructure:"reserved_ports"`
|
||||
}
|
||||
|
||||
// CanParseReserved retuns if the reserved ports specification is parsable.
|
||||
// CanParseReserved returns if the reserved ports specification is parsable.
|
||||
// The supported syntax is comma separated integers or ranges separated by
|
||||
// hyphens. For example, "80,120-150,160"
|
||||
func (r *Resources) CanParseReserved() error {
|
||||
|
|
|
@ -84,9 +84,8 @@ func TestConsul_Integration(t *testing.T) {
|
|||
MBits: 50,
|
||||
ReservedPorts: []structs.Port{{Label: "http", Value: 3}},
|
||||
}
|
||||
alloc.Resources.Networks[0] = netResource
|
||||
alloc.TaskResources["web"].Networks[0] = netResource
|
||||
task.Resources.Networks[0] = netResource
|
||||
alloc.AllocatedResources.Tasks["web"].Networks[0] = netResource
|
||||
|
||||
task.Services = []*structs.Service{
|
||||
{
|
||||
Name: "httpd",
|
||||
|
|
|
@ -42,7 +42,12 @@ func NewTaskServices(alloc *structs.Allocation, task *structs.Task, restarter Ta
|
|||
DriverNetwork: net,
|
||||
}
|
||||
|
||||
if task.Resources != nil {
|
||||
if alloc.AllocatedResources != nil {
|
||||
if tr, ok := alloc.AllocatedResources.Tasks[task.Name]; ok {
|
||||
ts.Networks = tr.Networks
|
||||
}
|
||||
} else if task.Resources != nil {
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
ts.Networks = task.Resources.Networks
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ func TestDeadlineHeap_WatchAndGet(t *testing.T) {
|
|||
var batch []string
|
||||
select {
|
||||
case batch = <-h.NextBatch():
|
||||
case <-time.After(testutil.Timeout(2 * wait)):
|
||||
case <-time.After(testutil.Timeout(3 * wait)):
|
||||
t.Fatal("timeout")
|
||||
}
|
||||
|
||||
|
|
|
@ -639,19 +639,12 @@ func (n *nomadFSM) applyAllocUpdate(buf []byte, index uint64) interface{} {
|
|||
// prior to being inserted into MemDB.
|
||||
structs.DenormalizeAllocationJobs(req.Job, req.Alloc)
|
||||
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
// Calculate the total resources of allocations. It is pulled out in the
|
||||
// payload to avoid encoding something that can be computed, but should be
|
||||
// denormalized prior to being inserted into MemDB.
|
||||
for _, alloc := range req.Alloc {
|
||||
if alloc.Resources != nil {
|
||||
// COMPAT 0.4.1 -> 0.5
|
||||
// Set the shared resources for allocations which don't have them
|
||||
if alloc.SharedResources == nil {
|
||||
alloc.SharedResources = &structs.Resources{
|
||||
DiskMB: alloc.Resources.DiskMB,
|
||||
}
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -1173,6 +1173,7 @@ func TestFSM_UpsertAllocs(t *testing.T) {
|
|||
fsm := testFSM(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Resources = &structs.Resources{} // COMPAT(0.11): Remove in 0.11, used to bypass resource creation in state store
|
||||
fsm.State().UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
|
||||
req := structs.AllocUpdateRequest{
|
||||
Alloc: []*structs.Allocation{alloc},
|
||||
|
@ -1231,6 +1232,7 @@ func TestFSM_UpsertAllocs_SharedJob(t *testing.T) {
|
|||
fsm := testFSM(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Resources = &structs.Resources{} // COMPAT(0.11): Remove in 0.11, used to bypass resource creation in state store
|
||||
fsm.State().UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
|
||||
job := alloc.Job
|
||||
alloc.Job = nil
|
||||
|
@ -1260,9 +1262,7 @@ func TestFSM_UpsertAllocs_SharedJob(t *testing.T) {
|
|||
|
||||
// Job should be re-attached
|
||||
alloc.Job = job
|
||||
if !reflect.DeepEqual(alloc, out) {
|
||||
t.Fatalf("bad: %#v %#v", alloc, out)
|
||||
}
|
||||
require.Equal(t, alloc, out)
|
||||
|
||||
// Ensure that the original job is used
|
||||
evictAlloc := new(structs.Allocation)
|
||||
|
@ -1299,11 +1299,44 @@ func TestFSM_UpsertAllocs_SharedJob(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
func TestFSM_UpsertAllocs_StrippedResources(t *testing.T) {
|
||||
t.Parallel()
|
||||
fsm := testFSM(t)
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Resources = &structs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
DiskMB: 150,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
alloc.TaskResources = map[string]*structs.Resources{
|
||||
"web": {
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 9876}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
alloc.SharedResources = &structs.Resources{
|
||||
DiskMB: 150,
|
||||
}
|
||||
|
||||
// Need to remove mock dynamic port from alloc as it won't be computed
|
||||
// in this test
|
||||
|
@ -1634,6 +1667,7 @@ func TestFSM_ApplyPlanResults(t *testing.T) {
|
|||
|
||||
// Create the request and create a deployment
|
||||
alloc := mock.Alloc()
|
||||
alloc.Resources = &structs.Resources{} // COMPAT(0.11): Remove in 0.11, used to bypass resource creation in state store
|
||||
job := alloc.Job
|
||||
alloc.Job = nil
|
||||
|
||||
|
@ -2375,37 +2409,6 @@ func TestFSM_SnapshotRestore_Allocs(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFSM_SnapshotRestore_Allocs_NoSharedResources(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Add some state
|
||||
fsm := testFSM(t)
|
||||
state := fsm.State()
|
||||
alloc1 := mock.Alloc()
|
||||
alloc2 := mock.Alloc()
|
||||
alloc1.SharedResources = nil
|
||||
alloc2.SharedResources = nil
|
||||
state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID))
|
||||
state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID))
|
||||
state.UpsertAllocs(1000, []*structs.Allocation{alloc1})
|
||||
state.UpsertAllocs(1001, []*structs.Allocation{alloc2})
|
||||
|
||||
// Verify the contents
|
||||
fsm2 := testSnapshotRestore(t, fsm)
|
||||
state2 := fsm2.State()
|
||||
ws := memdb.NewWatchSet()
|
||||
out1, _ := state2.AllocByID(ws, alloc1.ID)
|
||||
out2, _ := state2.AllocByID(ws, alloc2.ID)
|
||||
alloc1.SharedResources = &structs.Resources{DiskMB: 150}
|
||||
alloc2.SharedResources = &structs.Resources{DiskMB: 150}
|
||||
|
||||
if !reflect.DeepEqual(alloc1, out1) {
|
||||
t.Fatalf("bad: \n%#v\n%#v", out1, alloc1)
|
||||
}
|
||||
if !reflect.DeepEqual(alloc2, out2) {
|
||||
t.Fatalf("bad: \n%#v\n%#v", out2, alloc2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSM_SnapshotRestore_Indexes(t *testing.T) {
|
||||
t.Parallel()
|
||||
// Add some state
|
||||
|
|
|
@ -21,18 +21,13 @@ func Node() *structs.Node {
|
|||
"driver.exec": "1",
|
||||
"driver.mock_driver": "1",
|
||||
},
|
||||
|
||||
// TODO Remove once clientv2 gets merged
|
||||
Resources: &structs.Resources{
|
||||
CPU: 4000,
|
||||
MemoryMB: 8192,
|
||||
DiskMB: 100 * 1024,
|
||||
IOPS: 150,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
CIDR: "192.168.0.100/32",
|
||||
MBits: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
Reserved: &structs.Resources{
|
||||
CPU: 100,
|
||||
|
@ -47,6 +42,39 @@ func Node() *structs.Node {
|
|||
},
|
||||
},
|
||||
},
|
||||
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 4000,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 8192,
|
||||
},
|
||||
Disk: structs.NodeDiskResources{
|
||||
DiskMB: 100 * 1024,
|
||||
},
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
CIDR: "192.168.0.100/32",
|
||||
MBits: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
ReservedResources: &structs.NodeReservedResources{
|
||||
Cpu: structs.NodeReservedCpuResources{
|
||||
TotalShares: 100,
|
||||
},
|
||||
Memory: structs.NodeReservedMemoryResources{
|
||||
MemoryMB: 256,
|
||||
},
|
||||
Disk: structs.NodeReservedDiskResources{
|
||||
DiskMB: 4 * 1024,
|
||||
},
|
||||
Networks: structs.NodeReservedNetworkResources{
|
||||
ReservedHostPorts: "22",
|
||||
},
|
||||
},
|
||||
Links: map[string]string{
|
||||
"consul": "foobar.dc1",
|
||||
},
|
||||
|
@ -376,6 +404,8 @@ func Alloc() *structs.Allocation {
|
|||
NodeID: "12345678-abcd-efab-cdef-123456789abc",
|
||||
Namespace: structs.DefaultNamespace,
|
||||
TaskGroup: "web",
|
||||
|
||||
// TODO Remove once clientv2 gets merged
|
||||
Resources: &structs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
|
@ -408,6 +438,31 @@ func Alloc() *structs.Allocation {
|
|||
SharedResources: &structs.Resources{
|
||||
DiskMB: 150,
|
||||
},
|
||||
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 500,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 256,
|
||||
},
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 9876}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Shared: structs.AllocatedSharedResources{
|
||||
DiskMB: 150,
|
||||
},
|
||||
},
|
||||
Job: Job(),
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
|
@ -423,51 +478,8 @@ func BatchAlloc() *structs.Allocation {
|
|||
NodeID: "12345678-abcd-efab-cdef-123456789abc",
|
||||
Namespace: structs.DefaultNamespace,
|
||||
TaskGroup: "worker",
|
||||
Resources: &structs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
DiskMB: 150,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
TaskResources: map[string]*structs.Resources{
|
||||
"worker": {
|
||||
CPU: 100,
|
||||
MemoryMB: 100,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
MBits: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SharedResources: &structs.Resources{
|
||||
DiskMB: 150,
|
||||
},
|
||||
Job: BatchJob(),
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
}
|
||||
alloc.JobID = alloc.Job.ID
|
||||
return alloc
|
||||
}
|
||||
|
||||
func SystemAlloc() *structs.Allocation {
|
||||
alloc := &structs.Allocation{
|
||||
ID: uuid.Generate(),
|
||||
EvalID: uuid.Generate(),
|
||||
NodeID: "12345678-abcd-efab-cdef-123456789abc",
|
||||
Namespace: structs.DefaultNamespace,
|
||||
TaskGroup: "web",
|
||||
// TODO Remove once clientv2 gets merged
|
||||
Resources: &structs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
|
@ -500,6 +512,105 @@ func SystemAlloc() *structs.Allocation {
|
|||
SharedResources: &structs.Resources{
|
||||
DiskMB: 150,
|
||||
},
|
||||
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 500,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 256,
|
||||
},
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 9876}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Shared: structs.AllocatedSharedResources{
|
||||
DiskMB: 150,
|
||||
},
|
||||
},
|
||||
Job: BatchJob(),
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
}
|
||||
alloc.JobID = alloc.Job.ID
|
||||
return alloc
|
||||
}
|
||||
|
||||
func SystemAlloc() *structs.Allocation {
|
||||
alloc := &structs.Allocation{
|
||||
ID: uuid.Generate(),
|
||||
EvalID: uuid.Generate(),
|
||||
NodeID: "12345678-abcd-efab-cdef-123456789abc",
|
||||
Namespace: structs.DefaultNamespace,
|
||||
TaskGroup: "web",
|
||||
|
||||
// TODO Remove once clientv2 gets merged
|
||||
Resources: &structs.Resources{
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
DiskMB: 150,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
TaskResources: map[string]*structs.Resources{
|
||||
"web": {
|
||||
CPU: 500,
|
||||
MemoryMB: 256,
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 9876}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SharedResources: &structs.Resources{
|
||||
DiskMB: 150,
|
||||
},
|
||||
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 500,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 256,
|
||||
},
|
||||
Networks: []*structs.NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
IP: "192.168.0.100",
|
||||
ReservedPorts: []structs.Port{{Label: "admin", Value: 5000}},
|
||||
MBits: 50,
|
||||
DynamicPorts: []structs.Port{{Label: "http", Value: 9876}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Shared: structs.AllocatedSharedResources{
|
||||
DiskMB: 150,
|
||||
},
|
||||
},
|
||||
Job: SystemJob(),
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
|
|
|
@ -282,7 +282,7 @@ func TestPlanApply_EvalPlan_Partial(t *testing.T) {
|
|||
|
||||
alloc := mock.Alloc()
|
||||
alloc2 := mock.Alloc() // Ensure alloc2 does not fit
|
||||
alloc2.Resources = node2.Resources
|
||||
alloc2.AllocatedResources = structs.NodeResourcesToAllocatedResources(node2.NodeResources)
|
||||
|
||||
// Create a deployment where the allocs are markeda as canaries
|
||||
d := mock.Deployment()
|
||||
|
@ -340,7 +340,7 @@ func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) {
|
|||
|
||||
alloc := mock.Alloc()
|
||||
alloc2 := mock.Alloc() // Ensure alloc2 does not fit
|
||||
alloc2.Resources = node2.Resources
|
||||
alloc2.AllocatedResources = structs.NodeResourcesToAllocatedResources(node2.NodeResources)
|
||||
plan := &structs.Plan{
|
||||
Job: alloc.Job,
|
||||
AllAtOnce: true, // Require all to make progress
|
||||
|
@ -494,9 +494,9 @@ func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) {
|
|||
alloc := mock.Alloc()
|
||||
state := testStateStore(t)
|
||||
node := mock.Node()
|
||||
node.ReservedResources = nil
|
||||
alloc.NodeID = node.ID
|
||||
node.Resources = alloc.Resources
|
||||
node.Reserved = nil
|
||||
alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources)
|
||||
state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))
|
||||
state.UpsertNode(1000, node)
|
||||
state.UpsertAllocs(1001, []*structs.Allocation{alloc})
|
||||
|
@ -530,9 +530,10 @@ func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) {
|
|||
alloc := mock.Alloc()
|
||||
state := testStateStore(t)
|
||||
node := mock.Node()
|
||||
alloc.NodeID = node.ID
|
||||
node.Resources = alloc.Resources
|
||||
node.ReservedResources = nil
|
||||
node.Reserved = nil
|
||||
alloc.NodeID = node.ID
|
||||
alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources)
|
||||
state.UpsertNode(1000, node)
|
||||
state.UpsertAllocs(1001, []*structs.Allocation{alloc})
|
||||
snap, _ := state.Snapshot()
|
||||
|
@ -561,9 +562,9 @@ func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) {
|
|||
alloc := mock.Alloc()
|
||||
state := testStateStore(t)
|
||||
node := mock.Node()
|
||||
node.ReservedResources = nil
|
||||
alloc.NodeID = node.ID
|
||||
node.Resources = alloc.Resources
|
||||
node.Reserved = nil
|
||||
alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources)
|
||||
state.UpsertNode(1000, node)
|
||||
state.UpsertAllocs(1001, []*structs.Allocation{alloc})
|
||||
snap, _ := state.Snapshot()
|
||||
|
@ -599,10 +600,10 @@ func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) {
|
|||
alloc := mock.Alloc()
|
||||
state := testStateStore(t)
|
||||
node := mock.Node()
|
||||
node.ReservedResources = nil
|
||||
alloc.NodeID = node.ID
|
||||
alloc.DesiredStatus = structs.AllocDesiredStatusEvict
|
||||
node.Resources = alloc.Resources
|
||||
node.Reserved = nil
|
||||
alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources)
|
||||
state.UpsertNode(1000, node)
|
||||
state.UpsertAllocs(1001, []*structs.Allocation{alloc})
|
||||
snap, _ := state.Snapshot()
|
||||
|
@ -633,8 +634,8 @@ func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) {
|
|||
state := testStateStore(t)
|
||||
node := mock.Node()
|
||||
alloc.NodeID = node.ID
|
||||
node.Resources = alloc.Resources
|
||||
node.Reserved = nil
|
||||
alloc.AllocatedResources = structs.NodeResourcesToAllocatedResources(node.NodeResources)
|
||||
node.ReservedResources = nil
|
||||
node.Status = structs.NodeStatusDown
|
||||
state.UpsertNode(1000, node)
|
||||
state.UpsertAllocs(1001, []*structs.Allocation{alloc})
|
||||
|
|
|
@ -185,6 +185,7 @@ func (s *StateStore) UpsertPlanResults(index uint64, results *structs.ApplyPlanR
|
|||
// being inserted into MemDB.
|
||||
structs.DenormalizeAllocationJobs(results.Job, results.Alloc)
|
||||
|
||||
// COMPAT(0.11): Remove in 0.11
|
||||
// Calculate the total resources of allocations. It is pulled out in the
|
||||
// payload to avoid encoding something that can be computed, but should be
|
||||
// denormalized prior to being inserted into MemDB.
|
||||
|
@ -950,10 +951,6 @@ func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion b
|
|||
return fmt.Errorf("unable to upsert job into job_version table: %v", err)
|
||||
}
|
||||
|
||||
// Create the EphemeralDisk if it's nil by adding up DiskMB from task resources.
|
||||
// COMPAT 0.4.1 -> 0.5
|
||||
s.addEphemeralDiskToTaskGroups(job)
|
||||
|
||||
// Insert the job
|
||||
if err := txn.Insert("jobs", job); err != nil {
|
||||
return fmt.Errorf("job insert failed: %v", err)
|
||||
|
@ -2062,12 +2059,6 @@ func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation
|
|||
return err
|
||||
}
|
||||
|
||||
// Create the EphemeralDisk if it's nil by adding up DiskMB from task resources.
|
||||
// COMPAT 0.4.1 -> 0.5
|
||||
if alloc.Job != nil {
|
||||
s.addEphemeralDiskToTaskGroups(alloc.Job)
|
||||
}
|
||||
|
||||
if err := txn.Insert("allocs", alloc); err != nil {
|
||||
return fmt.Errorf("alloc insert failed: %v", err)
|
||||
}
|
||||
|
@ -3861,10 +3852,6 @@ func (r *StateRestore) NodeRestore(node *structs.Node) error {
|
|||
|
||||
// JobRestore is used to restore a job
|
||||
func (r *StateRestore) JobRestore(job *structs.Job) error {
|
||||
// Create the EphemeralDisk if it's nil by adding up DiskMB from task resources.
|
||||
// COMPAT 0.4.1 -> 0.5
|
||||
r.addEphemeralDiskToTaskGroups(job)
|
||||
|
||||
if err := r.txn.Insert("jobs", job); err != nil {
|
||||
return fmt.Errorf("job insert failed: %v", err)
|
||||
}
|
||||
|
@ -3881,19 +3868,6 @@ func (r *StateRestore) EvalRestore(eval *structs.Evaluation) error {
|
|||
|
||||
// AllocRestore is used to restore an allocation
|
||||
func (r *StateRestore) AllocRestore(alloc *structs.Allocation) error {
|
||||
// Set the shared resources if it's not present
|
||||
// COMPAT 0.4.1 -> 0.5
|
||||
if alloc.SharedResources == nil {
|
||||
alloc.SharedResources = &structs.Resources{
|
||||
DiskMB: alloc.Resources.DiskMB,
|
||||
}
|
||||
}
|
||||
|
||||
// Create the EphemeralDisk if it's nil by adding up DiskMB from task resources.
|
||||
if alloc.Job != nil {
|
||||
r.addEphemeralDiskToTaskGroups(alloc.Job)
|
||||
}
|
||||
|
||||
if err := r.txn.Insert("allocs", alloc); err != nil {
|
||||
return fmt.Errorf("alloc insert failed: %v", err)
|
||||
}
|
||||
|
|
|
@ -1352,41 +1352,6 @@ func TestStateStore_UpdateUpsertJob_PeriodicJob(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
// This test ensures that UpsertJob creates the EphemeralDisk is a job doesn't have
|
||||
// one and clear out the task's disk resource asks
|
||||
// COMPAT 0.4.1 -> 0.5
|
||||
func TestStateStore_UpsertJob_NoEphemeralDisk(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
job := mock.Job()
|
||||
|
||||
// Set the EphemeralDisk to nil and set the tasks's DiskMB to 150
|
||||
job.TaskGroups[0].EphemeralDisk = nil
|
||||
job.TaskGroups[0].Tasks[0].Resources.DiskMB = 150
|
||||
|
||||
err := state.UpsertJob(1000, job)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := state.JobByID(ws, job.Namespace, job.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Expect the state store to create the EphemeralDisk and clear out Tasks's
|
||||
// DiskMB
|
||||
expected := job.Copy()
|
||||
expected.TaskGroups[0].EphemeralDisk = &structs.EphemeralDisk{
|
||||
SizeMB: 150,
|
||||
}
|
||||
expected.TaskGroups[0].Tasks[0].Resources.DiskMB = 0
|
||||
|
||||
if !reflect.DeepEqual(expected, out) {
|
||||
t.Fatalf("bad: %#v %#v", expected, out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertJob_BadNamespace(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
state := testStateStore(t)
|
||||
|
@ -2122,45 +2087,6 @@ func TestStateStore_RestoreJob(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// This test ensures that the state restore creates the EphemeralDisk for a job if
|
||||
// it doesn't have one
|
||||
// COMPAT 0.4.1 -> 0.5
|
||||
func TestStateStore_Jobs_NoEphemeralDisk(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
job := mock.Job()
|
||||
|
||||
// Set EphemeralDisk to nil and set the DiskMB to 150
|
||||
job.TaskGroups[0].EphemeralDisk = nil
|
||||
job.TaskGroups[0].Tasks[0].Resources.DiskMB = 150
|
||||
|
||||
restore, err := state.Restore()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
err = restore.JobRestore(job)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
restore.Commit()
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := state.JobByID(ws, job.Namespace, job.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Expect job to have local disk and clear out the task's disk resource ask
|
||||
expected := job.Copy()
|
||||
expected.TaskGroups[0].EphemeralDisk = &structs.EphemeralDisk{
|
||||
SizeMB: 150,
|
||||
}
|
||||
expected.TaskGroups[0].Tasks[0].Resources.DiskMB = 0
|
||||
if !reflect.DeepEqual(out, expected) {
|
||||
t.Fatalf("Bad: %#v %#v", out, job)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertPeriodicLaunch(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
job := mock.Job()
|
||||
|
@ -3763,34 +3689,6 @@ func TestStateStore_UpsertAlloc_No_Job(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertAlloc_NoEphemeralDisk(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].EphemeralDisk = nil
|
||||
alloc.Job.TaskGroups[0].Tasks[0].Resources.DiskMB = 120
|
||||
|
||||
if err := state.UpsertJob(999, alloc.Job); err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
err := state.UpsertAllocs(1000, []*structs.Allocation{alloc})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := state.AllocByID(ws, alloc.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
expected := alloc.Copy()
|
||||
expected.Job.TaskGroups[0].EphemeralDisk = &structs.EphemeralDisk{SizeMB: 120}
|
||||
if !reflect.DeepEqual(expected, out) {
|
||||
t.Fatalf("bad: %#v %#v", expected, out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_UpsertAlloc_ChildJob(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
|
||||
|
@ -4786,43 +4684,6 @@ func TestStateStore_RestoreAlloc(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStateStore_RestoreAlloc_NoEphemeralDisk(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
alloc := mock.Alloc()
|
||||
alloc.Job.TaskGroups[0].EphemeralDisk = nil
|
||||
alloc.Job.TaskGroups[0].Tasks[0].Resources.DiskMB = 120
|
||||
|
||||
restore, err := state.Restore()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
err = restore.AllocRestore(alloc)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
restore.Commit()
|
||||
|
||||
ws := memdb.NewWatchSet()
|
||||
out, err := state.AllocByID(ws, alloc.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
expected := alloc.Copy()
|
||||
expected.Job.TaskGroups[0].EphemeralDisk = &structs.EphemeralDisk{SizeMB: 120}
|
||||
expected.Job.TaskGroups[0].Tasks[0].Resources.DiskMB = 0
|
||||
|
||||
if !reflect.DeepEqual(out, expected) {
|
||||
t.Fatalf("Bad: %#v %#v", out, expected)
|
||||
}
|
||||
|
||||
if watchFired(ws) {
|
||||
t.Fatalf("bad")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateStore_SetJobStatus_ForceStatus(t *testing.T) {
|
||||
state := testStateStore(t)
|
||||
txn := state.db.Txn(true)
|
||||
|
|
|
@ -18,15 +18,21 @@ func testNode() *Node {
|
|||
"version": "0.1.0",
|
||||
"driver.exec": "1",
|
||||
},
|
||||
Resources: &Resources{
|
||||
CPU: 4000,
|
||||
MemoryMB: 8192,
|
||||
DiskMB: 100 * 1024,
|
||||
IOPS: 150,
|
||||
NodeResources: &NodeResources{
|
||||
Cpu: NodeCpuResources{
|
||||
TotalShares: 4000,
|
||||
},
|
||||
Memory: NodeMemoryResources{
|
||||
MemoryMB: 8192,
|
||||
},
|
||||
Disk: NodeDiskResources{
|
||||
DiskMB: 100 * 1024,
|
||||
},
|
||||
Networks: []*NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
CIDR: "192.168.0.100/32",
|
||||
IP: "192.168.0.100",
|
||||
MBits: 1000,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -1794,12 +1794,7 @@ func (r *Resources) Copy() *Resources {
|
|||
|
||||
// NetIndex finds the matching net index using device name
|
||||
func (r *Resources) NetIndex(n *NetworkResource) int {
|
||||
for idx, net := range r.Networks {
|
||||
if net.Device == n.Device {
|
||||
return idx
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return r.Networks.NetIndex(n)
|
||||
}
|
||||
|
||||
// Superset checks if one set of resources is a superset
|
||||
|
@ -1994,6 +1989,15 @@ func (ns Networks) Port(label string) (string, int) {
|
|||
return "", 0
|
||||
}
|
||||
|
||||
func (ns Networks) NetIndex(n *NetworkResource) int {
|
||||
for idx, net := range ns {
|
||||
if net.Device == n.Device {
|
||||
return idx
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// NodeResources is used to define the resources available on a client node.
|
||||
type NodeResources struct {
|
||||
Cpu NodeCpuResources
|
||||
|
@ -2335,12 +2339,7 @@ func (a *AllocatedTaskResources) Copy() *AllocatedTaskResources {
|
|||
|
||||
// NetIndex finds the matching net index using device name
|
||||
func (a *AllocatedTaskResources) NetIndex(n *NetworkResource) int {
|
||||
for idx, net := range a.Networks {
|
||||
if net.Device == n.Device {
|
||||
return idx
|
||||
}
|
||||
}
|
||||
return -1
|
||||
return a.Networks.NetIndex(n)
|
||||
}
|
||||
|
||||
func (a *AllocatedTaskResources) Add(delta *AllocatedTaskResources) {
|
||||
|
@ -2432,6 +2431,11 @@ func (c *ComparableAllocatedResources) Superset(other *ComparableAllocatedResour
|
|||
return true, ""
|
||||
}
|
||||
|
||||
// allocated finds the matching net index using device name
|
||||
func (c *ComparableAllocatedResources) NetIndex(n *NetworkResource) int {
|
||||
return c.Flattened.Networks.NetIndex(n)
|
||||
}
|
||||
|
||||
const (
|
||||
// JobTypeNomad is reserved for internal system tasks and is
|
||||
// always handled by the CoreScheduler.
|
||||
|
@ -3932,19 +3936,6 @@ func (tg *TaskGroup) Canonicalize(job *Job) {
|
|||
for _, task := range tg.Tasks {
|
||||
task.Canonicalize(job, tg)
|
||||
}
|
||||
|
||||
// Add up the disk resources to EphemeralDisk. This is done so that users
|
||||
// are not required to move their disk attribute from resources to
|
||||
// EphemeralDisk section of the job spec in Nomad 0.5
|
||||
// COMPAT 0.4.1 -> 0.5
|
||||
// Remove in 0.6
|
||||
var diskMB int
|
||||
for _, task := range tg.Tasks {
|
||||
diskMB += task.Resources.DiskMB
|
||||
}
|
||||
if diskMB > 0 {
|
||||
tg.EphemeralDisk.SizeMB = diskMB
|
||||
}
|
||||
}
|
||||
|
||||
// Validate is used to sanity check a task group
|
||||
|
|
|
@ -3915,6 +3915,38 @@ func TestNode_Copy(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
NodeResources: &NodeResources{
|
||||
Cpu: NodeCpuResources{
|
||||
TotalShares: 4000,
|
||||
},
|
||||
Memory: NodeMemoryResources{
|
||||
MemoryMB: 8192,
|
||||
},
|
||||
Disk: NodeDiskResources{
|
||||
DiskMB: 100 * 1024,
|
||||
},
|
||||
Networks: []*NetworkResource{
|
||||
{
|
||||
Device: "eth0",
|
||||
CIDR: "192.168.0.100/32",
|
||||
MBits: 1000,
|
||||
},
|
||||
},
|
||||
},
|
||||
ReservedResources: &NodeReservedResources{
|
||||
Cpu: NodeReservedCpuResources{
|
||||
TotalShares: 100,
|
||||
},
|
||||
Memory: NodeReservedMemoryResources{
|
||||
MemoryMB: 256,
|
||||
},
|
||||
Disk: NodeReservedDiskResources{
|
||||
DiskMB: 4 * 1024,
|
||||
},
|
||||
Networks: NodeReservedNetworkResources{
|
||||
ReservedHostPorts: "22",
|
||||
},
|
||||
},
|
||||
Links: map[string]string{
|
||||
"consul": "foobar.dc1",
|
||||
},
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
package structs
|
||||
|
||||
// NodeResourcesToAllocatedResources converts a node resources to an allocated
|
||||
// resources. The task name used is "web" and network is omitted. This is
|
||||
// useful when trying to make an allocation fill an entire node.
|
||||
func NodeResourcesToAllocatedResources(n *NodeResources) *AllocatedResources {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &AllocatedResources{
|
||||
Tasks: map[string]*AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: AllocatedCpuResources{
|
||||
CpuShares: n.Cpu.TotalShares,
|
||||
},
|
||||
Memory: AllocatedMemoryResources{
|
||||
MemoryMB: n.Memory.MemoryMB,
|
||||
},
|
||||
},
|
||||
},
|
||||
Shared: AllocatedSharedResources{
|
||||
DiskMB: n.Disk.DiskMB,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -31,9 +31,13 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
|
|||
Node: &structs.Node{
|
||||
// Perfect fit
|
||||
ID: uuid.Generate(),
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 2048,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -41,9 +45,13 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
|
|||
Node: &structs.Node{
|
||||
// Perfect fit
|
||||
ID: uuid.Generate(),
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 2048,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -58,9 +66,17 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
|
|||
NodeID: nodes[0].Node.ID,
|
||||
JobID: j1.ID,
|
||||
Job: j1,
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 2048,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
|
@ -73,9 +89,17 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
|
|||
NodeID: nodes[1].Node.ID,
|
||||
JobID: j2.ID,
|
||||
Job: j2,
|
||||
Resources: &structs.Resources{
|
||||
CPU: 1024,
|
||||
MemoryMB: 1024,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 1024,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
|
@ -92,9 +116,17 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
|
|||
// Add a planned placement to node1
|
||||
plan.NodeAllocation[nodes[1].Node.ID] = []*structs.Allocation{
|
||||
{
|
||||
Resources: &structs.Resources{
|
||||
CPU: 1024,
|
||||
MemoryMB: 1024,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 1024,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -981,7 +981,11 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) {
|
|||
|
||||
// Create a full node
|
||||
node := mock.Node()
|
||||
node.Reserved = node.Resources
|
||||
node.ReservedResources = &structs.NodeReservedResources{
|
||||
Cpu: structs.NodeReservedCpuResources{
|
||||
TotalShares: node.NodeResources.Cpu.TotalShares,
|
||||
},
|
||||
}
|
||||
node.ComputeClass()
|
||||
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
|
||||
|
||||
|
@ -1508,7 +1512,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) {
|
|||
|
||||
// Create one node
|
||||
node := mock.Node()
|
||||
node.Resources.CPU = 1000
|
||||
node.NodeResources.Cpu.TotalShares = 1000
|
||||
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
|
||||
|
||||
// Generate a fake job with one allocation
|
||||
|
@ -1523,7 +1527,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) {
|
|||
alloc.JobID = job.ID
|
||||
alloc.NodeID = node.ID
|
||||
alloc.Name = "my-job.web[0]"
|
||||
alloc.Resources.CPU = 256
|
||||
alloc.AllocatedResources.Tasks["web"].Cpu.CpuShares = 256
|
||||
allocs = append(allocs, alloc)
|
||||
noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
|
||||
|
||||
|
@ -1804,24 +1808,41 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) {
|
|||
func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) {
|
||||
h := NewHarness(t)
|
||||
|
||||
// Create a node
|
||||
// Create a node and clear the reserved resources
|
||||
node := mock.Node()
|
||||
node.ReservedResources = nil
|
||||
noErr(t, h.State.UpsertNode(h.NextIndex(), node))
|
||||
|
||||
resourceAsk := node.Resources.Copy()
|
||||
resourceAsk.CPU -= node.Reserved.CPU
|
||||
resourceAsk.MemoryMB -= node.Reserved.MemoryMB
|
||||
resourceAsk.DiskMB -= node.Reserved.DiskMB
|
||||
resourceAsk.Networks = nil
|
||||
// Create a resource ask that is the same as the resources available on the
|
||||
// node
|
||||
cpu := node.NodeResources.Cpu.TotalShares
|
||||
mem := node.NodeResources.Memory.MemoryMB
|
||||
|
||||
request := &structs.Resources{
|
||||
CPU: int(cpu),
|
||||
MemoryMB: int(mem),
|
||||
}
|
||||
allocated := &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: cpu,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: mem,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Generate a fake job with one alloc that consumes the whole node
|
||||
job := mock.Job()
|
||||
job.TaskGroups[0].Count = 1
|
||||
job.TaskGroups[0].Tasks[0].Resources = resourceAsk
|
||||
job.TaskGroups[0].Tasks[0].Resources = request
|
||||
noErr(t, h.State.UpsertJob(h.NextIndex(), job))
|
||||
|
||||
alloc := mock.Alloc()
|
||||
alloc.Resources = resourceAsk
|
||||
alloc.AllocatedResources = allocated
|
||||
alloc.Job = job
|
||||
alloc.JobID = job.ID
|
||||
alloc.NodeID = node.ID
|
||||
|
@ -1838,7 +1859,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) {
|
|||
MinHealthyTime: 10 * time.Second,
|
||||
HealthyDeadline: 10 * time.Minute,
|
||||
}
|
||||
job2.TaskGroups[0].Tasks[0].Resources = mock.Alloc().Resources
|
||||
job2.TaskGroups[0].Tasks[0].Resources = mock.Job().TaskGroups[0].Tasks[0].Resources
|
||||
|
||||
// Update the task, such that it cannot be done in-place
|
||||
job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
|
||||
|
@ -1880,7 +1901,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) {
|
|||
for _, allocList := range plan.NodeAllocation {
|
||||
planned = append(planned, allocList...)
|
||||
}
|
||||
if len(planned) != 1 {
|
||||
if len(planned) != 5 {
|
||||
t.Fatalf("bad: %#v", plan)
|
||||
}
|
||||
|
||||
|
@ -1899,7 +1920,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) {
|
|||
if !ok {
|
||||
t.Fatalf("bad: %#v", plan)
|
||||
}
|
||||
if state.DesiredTotal != 1 && state.DesiredCanaries != 0 {
|
||||
if state.DesiredTotal != 5 || state.DesiredCanaries != 0 {
|
||||
t.Fatalf("bad: %#v", state)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,39 +31,63 @@ func TestBinPackIterator_NoExistingAlloc(t *testing.T) {
|
|||
{
|
||||
Node: &structs.Node{
|
||||
// Perfect fit
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 2048,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
Reserved: &structs.Resources{
|
||||
CPU: 1024,
|
||||
MemoryMB: 1024,
|
||||
ReservedResources: &structs.NodeReservedResources{
|
||||
Cpu: structs.NodeReservedCpuResources{
|
||||
TotalShares: 1024,
|
||||
},
|
||||
Memory: structs.NodeReservedMemoryResources{
|
||||
MemoryMB: 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Node: &structs.Node{
|
||||
// Overloaded
|
||||
Resources: &structs.Resources{
|
||||
CPU: 1024,
|
||||
MemoryMB: 1024,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 1024,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 1024,
|
||||
},
|
||||
},
|
||||
Reserved: &structs.Resources{
|
||||
CPU: 512,
|
||||
MemoryMB: 512,
|
||||
ReservedResources: &structs.NodeReservedResources{
|
||||
Cpu: structs.NodeReservedCpuResources{
|
||||
TotalShares: 512,
|
||||
},
|
||||
Memory: structs.NodeReservedMemoryResources{
|
||||
MemoryMB: 512,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Node: &structs.Node{
|
||||
// 50% fit
|
||||
Resources: &structs.Resources{
|
||||
CPU: 4096,
|
||||
MemoryMB: 4096,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 4096,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 4096,
|
||||
},
|
||||
},
|
||||
Reserved: &structs.Resources{
|
||||
CPU: 1024,
|
||||
MemoryMB: 1024,
|
||||
ReservedResources: &structs.NodeReservedResources{
|
||||
Cpu: structs.NodeReservedCpuResources{
|
||||
TotalShares: 1024,
|
||||
},
|
||||
Memory: structs.NodeReservedMemoryResources{
|
||||
MemoryMB: 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -110,9 +134,13 @@ func TestBinPackIterator_PlannedAlloc(t *testing.T) {
|
|||
Node: &structs.Node{
|
||||
// Perfect fit
|
||||
ID: uuid.Generate(),
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 2048,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -120,9 +148,13 @@ func TestBinPackIterator_PlannedAlloc(t *testing.T) {
|
|||
Node: &structs.Node{
|
||||
// Perfect fit
|
||||
ID: uuid.Generate(),
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 2048,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -133,9 +165,17 @@ func TestBinPackIterator_PlannedAlloc(t *testing.T) {
|
|||
plan := ctx.Plan()
|
||||
plan.NodeAllocation[nodes[0].Node.ID] = []*structs.Allocation{
|
||||
{
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 2048,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -143,9 +183,17 @@ func TestBinPackIterator_PlannedAlloc(t *testing.T) {
|
|||
// Add a planned alloc to node2 that half fills it
|
||||
plan.NodeAllocation[nodes[1].Node.ID] = []*structs.Allocation{
|
||||
{
|
||||
Resources: &structs.Resources{
|
||||
CPU: 1024,
|
||||
MemoryMB: 1024,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 1024,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -188,9 +236,13 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) {
|
|||
Node: &structs.Node{
|
||||
// Perfect fit
|
||||
ID: uuid.Generate(),
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 2048,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -198,9 +250,13 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) {
|
|||
Node: &structs.Node{
|
||||
// Perfect fit
|
||||
ID: uuid.Generate(),
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 2048,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -216,9 +272,17 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) {
|
|||
NodeID: nodes[0].Node.ID,
|
||||
JobID: j1.ID,
|
||||
Job: j1,
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 2048,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
|
@ -231,9 +295,17 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) {
|
|||
NodeID: nodes[1].Node.ID,
|
||||
JobID: j2.ID,
|
||||
Job: j2,
|
||||
Resources: &structs.Resources{
|
||||
CPU: 1024,
|
||||
MemoryMB: 1024,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 1024,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
|
@ -279,9 +351,13 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
|
|||
Node: &structs.Node{
|
||||
// Perfect fit
|
||||
ID: uuid.Generate(),
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 2048,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -289,9 +365,13 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
|
|||
Node: &structs.Node{
|
||||
// Perfect fit
|
||||
ID: uuid.Generate(),
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
NodeResources: &structs.NodeResources{
|
||||
Cpu: structs.NodeCpuResources{
|
||||
TotalShares: 2048,
|
||||
},
|
||||
Memory: structs.NodeMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -307,9 +387,17 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
|
|||
NodeID: nodes[0].Node.ID,
|
||||
JobID: j1.ID,
|
||||
Job: j1,
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 2048,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
|
@ -322,9 +410,17 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
|
|||
NodeID: nodes[1].Node.ID,
|
||||
JobID: j2.ID,
|
||||
Job: j2,
|
||||
Resources: &structs.Resources{
|
||||
CPU: 1024,
|
||||
MemoryMB: 1024,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 1024,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
ClientStatus: structs.AllocClientStatusPending,
|
||||
|
|
|
@ -48,13 +48,25 @@ func allocUpdateFnDestructive(*structs.Allocation, *structs.Job, *structs.TaskGr
|
|||
func allocUpdateFnInplace(existing *structs.Allocation, _ *structs.Job, newTG *structs.TaskGroup) (bool, bool, *structs.Allocation) {
|
||||
// Create a shallow copy
|
||||
newAlloc := existing.CopySkipJob()
|
||||
newAlloc.TaskResources = make(map[string]*structs.Resources)
|
||||
newAlloc.AllocatedResources = &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{},
|
||||
Shared: structs.AllocatedSharedResources{
|
||||
DiskMB: uint64(newTG.EphemeralDisk.SizeMB),
|
||||
},
|
||||
}
|
||||
|
||||
// Use the new task resources but keep the network from the old
|
||||
for _, task := range newTG.Tasks {
|
||||
r := task.Resources.Copy()
|
||||
r.Networks = existing.TaskResources[task.Name].Networks
|
||||
newAlloc.TaskResources[task.Name] = r
|
||||
networks := existing.AllocatedResources.Tasks[task.Name].Copy().Networks
|
||||
newAlloc.AllocatedResources.Tasks[task.Name] = &structs.AllocatedTaskResources{
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: uint64(task.Resources.CPU),
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: uint64(task.Resources.MemoryMB),
|
||||
},
|
||||
Networks: networks,
|
||||
}
|
||||
}
|
||||
|
||||
return false, false, newAlloc
|
||||
|
|
|
@ -279,7 +279,11 @@ func TestServiceStack_Select_BinPack_Overflow(t *testing.T) {
|
|||
}
|
||||
zero := nodes[0]
|
||||
one := nodes[1]
|
||||
one.Reserved = one.Resources
|
||||
one.ReservedResources = &structs.NodeReservedResources{
|
||||
Cpu: structs.NodeReservedCpuResources{
|
||||
TotalShares: one.NodeResources.Cpu.TotalShares,
|
||||
},
|
||||
}
|
||||
|
||||
stack := NewGenericStack(false, ctx)
|
||||
stack.SetNodes(nodes)
|
||||
|
@ -493,7 +497,11 @@ func TestSystemStack_Select_BinPack_Overflow(t *testing.T) {
|
|||
mock.Node(),
|
||||
}
|
||||
zero := nodes[0]
|
||||
zero.Reserved = zero.Resources
|
||||
zero.ReservedResources = &structs.NodeReservedResources{
|
||||
Cpu: structs.NodeReservedCpuResources{
|
||||
TotalShares: zero.NodeResources.Cpu.TotalShares,
|
||||
},
|
||||
}
|
||||
one := nodes[1]
|
||||
|
||||
stack := NewSystemStack(ctx)
|
||||
|
|
|
@ -529,8 +529,17 @@ func inplaceUpdate(ctx Context, eval *structs.Evaluation, job *structs.Job,
|
|||
// to be updated. This is guarded in taskUpdated, so we can
|
||||
// safely restore those here.
|
||||
for task, resources := range option.TaskResources {
|
||||
existing := update.Alloc.TaskResources[task]
|
||||
resources.Networks = existing.Networks
|
||||
var networks structs.Networks
|
||||
if update.Alloc.AllocatedResources != nil {
|
||||
if tr, ok := update.Alloc.AllocatedResources.Tasks[task]; ok {
|
||||
networks = tr.Networks
|
||||
}
|
||||
} else if tr, ok := update.Alloc.TaskResources[task]; ok {
|
||||
networks = tr.Networks
|
||||
}
|
||||
|
||||
// Add thhe networks back
|
||||
resources.Networks = networks
|
||||
}
|
||||
|
||||
// Create a shallow copy
|
||||
|
@ -796,8 +805,17 @@ func genericAllocUpdateFn(ctx Context, stack Stack, evalID string) allocUpdateTy
|
|||
// to be updated. This is guarded in taskUpdated, so we can
|
||||
// safely restore those here.
|
||||
for task, resources := range option.TaskResources {
|
||||
existingResources := existing.TaskResources[task]
|
||||
resources.Networks = existingResources.Networks
|
||||
var networks structs.Networks
|
||||
if existing.AllocatedResources != nil {
|
||||
if tr, ok := existing.AllocatedResources.Tasks[task]; ok {
|
||||
networks = tr.Networks
|
||||
}
|
||||
} else if tr, ok := existing.TaskResources[task]; ok {
|
||||
networks = tr.Networks
|
||||
}
|
||||
|
||||
// Add thhe networks back
|
||||
resources.Networks = networks
|
||||
}
|
||||
|
||||
// Create a shallow copy
|
||||
|
|
|
@ -736,9 +736,17 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
|
|||
NodeID: node.ID,
|
||||
JobID: job.ID,
|
||||
Job: job,
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 2048,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
TaskGroup: "web",
|
||||
|
@ -785,9 +793,17 @@ func TestInplaceUpdate_NoMatch(t *testing.T) {
|
|||
NodeID: node.ID,
|
||||
JobID: job.ID,
|
||||
Job: job,
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 2048,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
TaskGroup: "web",
|
||||
|
@ -834,9 +850,17 @@ func TestInplaceUpdate_Success(t *testing.T) {
|
|||
JobID: job.ID,
|
||||
Job: job,
|
||||
TaskGroup: job.TaskGroups[0].Name,
|
||||
Resources: &structs.Resources{
|
||||
CPU: 2048,
|
||||
MemoryMB: 2048,
|
||||
AllocatedResources: &structs.AllocatedResources{
|
||||
Tasks: map[string]*structs.AllocatedTaskResources{
|
||||
"web": {
|
||||
Cpu: structs.AllocatedCpuResources{
|
||||
CpuShares: 2048,
|
||||
},
|
||||
Memory: structs.AllocatedMemoryResources{
|
||||
MemoryMB: 2048,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
DesiredStatus: structs.AllocDesiredStatusRun,
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue