Migrate to the new resources properties for allocs and nodes

This commit is contained in:
Michael Lange 2020-10-12 15:26:54 -07:00
parent b2b7d5e19e
commit 64fc738733
14 changed files with 107 additions and 77 deletions

View File

@ -9,6 +9,21 @@ const taskGroupFromJob = (job, taskGroupName) => {
return taskGroup ? taskGroup : null;
};
const merge = tasks => {
const mergedResources = {
Cpu: { CpuShares: 0 },
Memory: { MemoryMB: 0 },
Disk: { DiskMB: 0 },
};
return tasks.reduce((resources, task) => {
resources.Cpu.CpuShares += (task.Cpu && task.Cpu.CpuShares) || 0;
resources.Memory.MemoryMB += (task.Memory && task.Memory.MemoryMB) || 0;
resources.Disk.DiskMB += (task.Disk && task.Disk.DiskMB) || 0;
return resources;
}, mergedResources);
};
@classic
export default class AllocationSerializer extends ApplicationSerializer {
@service system;
@ -30,7 +45,7 @@ export default class AllocationSerializer extends ApplicationSerializer {
const state = states[key] || {};
const summary = { Name: key };
Object.keys(state).forEach(stateKey => (summary[stateKey] = state[stateKey]));
summary.Resources = hash.TaskResources && hash.TaskResources[key];
summary.Resources = hash.AllocatedResources && hash.AllocatedResources.Tasks[key];
return summary;
});
@ -57,8 +72,13 @@ export default class AllocationSerializer extends ApplicationSerializer {
hash.PreemptedByAllocationID = hash.PreemptedByAllocation || null;
hash.WasPreempted = !!hash.PreemptedByAllocationID;
// When present, the resources are nested under AllocatedResources.Shared
hash.AllocatedResources = hash.AllocatedResources && hash.AllocatedResources.Shared;
const shared = hash.AllocatedResources && hash.AllocatedResources.Shared;
hash.AllocatedResources =
hash.AllocatedResources && merge(Object.values(hash.AllocatedResources.Tasks));
if (shared) {
hash.AllocatedResources.Ports = shared.Ports;
hash.AllocatedResources.Networks = shared.Networks;
}
// The Job definition for an allocation is only included in findRecord responses.
hash.AllocationTaskGroup = !hash.Job ? null : taskGroupFromJob(hash.Job, hash.TaskGroup);

View File

@ -7,6 +7,8 @@ export default class NodeSerializer extends ApplicationSerializer {
attrs = {
isDraining: 'Drain',
httpAddr: 'HTTPAddr',
resources: 'NodeResources',
reserved: 'ReservedResources',
};
mapToArray = ['Drivers', 'HostVolumes'];

View File

@ -1,12 +1,21 @@
import ApplicationSerializer from './application';
export default class ResourcesSerializer extends ApplicationSerializer {
attrs = {
cpu: 'CPU',
memory: 'MemoryMB',
disk: 'DiskMB',
iops: 'IOPS',
};
arrayNullOverrides = ['Ports', 'Networks'];
arrayNullOverrides = ['Ports'];
normalize(typeHash, hash) {
hash.Cpu = hash.Cpu && hash.Cpu.CpuShares;
hash.Memory = hash.Memory && hash.Memory.MemoryMB;
hash.Disk = hash.Disk && hash.Disk.DiskMB;
// Networks for ReservedResources is different than for Resources.
// This smooths over the differences, but doesn't actually support
// anything in the ReservedResources.Networks object, since we don't
// use any of it in the UI.
if (!(hash.Networks instanceof Array)) {
hash.Networks = [];
}
return super.normalize(...arguments);
}
}

View File

@ -5,10 +5,8 @@ import { provide } from './utils';
const CPU_RESERVATIONS = [250, 500, 1000, 2000, 2500, 4000];
const MEMORY_RESERVATIONS = [256, 512, 1024, 2048, 4096, 8192];
const DISK_RESERVATIONS = [200, 500, 1000, 2000, 5000, 10000, 100000];
const IOPS_RESERVATIONS = [100000, 250000, 500000, 1000000, 10000000, 20000000];
// There is also a good chance that certain resource restrictions are unbounded
IOPS_RESERVATIONS.push(...Array(1000).fill(0));
DISK_RESERVATIONS.push(...Array(500).fill(0));
const NETWORK_MODES = ['bridge', 'host'];
@ -27,10 +25,15 @@ export const STORAGE_PROVIDERS = ['ebs', 'zfs', 'nfs', 'cow', 'moo'];
export function generateResources(options = {}) {
return {
CPU: options.CPU || faker.helpers.randomize(CPU_RESERVATIONS),
MemoryMB: options.MemoryMB || faker.helpers.randomize(MEMORY_RESERVATIONS),
DiskMB: options.DiskMB || faker.helpers.randomize(DISK_RESERVATIONS),
IOPS: options.IOPS || faker.helpers.randomize(IOPS_RESERVATIONS),
Cpu: {
CpuShares: options.CPU || faker.helpers.randomize(CPU_RESERVATIONS),
},
Memory: {
MemoryMB: options.MemoryMB || faker.helpers.randomize(MEMORY_RESERVATIONS),
},
Disk: {
DiskMB: options.DiskMB || faker.helpers.randomize(DISK_RESERVATIONS),
},
Networks: generateNetworks(options.networks),
Ports: generatePorts(options.networks),
};

View File

@ -42,15 +42,16 @@ export default Factory.extend({
const taskGroup = server.db.taskGroups.findBy({ name: allocation.taskGroup });
const resources = taskGroup.taskIds.map(id => {
const task = server.db.tasks.find(id);
return server.create(
'task-resource',
{
allocation,
name: task.name,
resources: task.Resources,
},
'withReservedPorts'
);
return server.create('task-resource', {
allocation,
name: task.name,
resources: generateResources({
CPU: task.resources.CPU,
MemoryMB: task.resources.MemoryMB,
DiskMB: task.resources.DiskMB,
networks: { minPorts: 1 },
}),
});
});
allocation.update({ taskResourceIds: resources.mapBy('id') });
@ -62,29 +63,22 @@ export default Factory.extend({
const taskGroup = server.db.taskGroups.findBy({ name: allocation.taskGroup });
const resources = taskGroup.taskIds.map(id => {
const task = server.db.tasks.find(id);
return server.create(
'task-resource',
{
allocation,
name: task.name,
resources: task.Resources,
},
'withoutReservedPorts'
);
return server.create('task-resource', {
allocation,
name: task.name,
resources: generateResources({
CPU: task.resources.CPU,
MemoryMB: task.resources.MemoryMB,
DiskMB: task.resources.DiskMB,
networks: { minPorts: 0, maxPorts: 0 },
}),
});
});
allocation.update({ taskResourceIds: resources.mapBy('id') });
},
}),
withAllocatedResources: trait({
allocatedResources: () => {
return {
Shared: generateResources({ networks: { minPorts: 2 } }),
};
},
}),
rescheduleAttempts: 0,
rescheduleSuccess: false,
@ -200,7 +194,7 @@ export default Factory.extend({
return server.create('task-resource', {
allocation,
name: task.name,
resources: task.Resources,
resources: task.originalResources,
});
});

View File

@ -74,7 +74,7 @@ export default Factory.extend({
hostVolumes: makeHostVolumes,
resources: generateResources,
nodeResources: generateResources,
attributes() {
// TODO add variability to these

View File

@ -5,12 +5,4 @@ export default Factory.extend({
name: () => '!!!this should be set by the allocation that owns this task state!!!',
resources: generateResources,
withReservedPorts: trait({
resources: () => generateResources({ networks: { minPorts: 1 } }),
}),
withoutReservedPorts: trait({
resources: () => generateResources({ networks: { minPorts: 0, maxPorts: 0 } }),
}),
});

View File

@ -16,7 +16,17 @@ export default Factory.extend({
name: id => `task-${faker.hacker.noun().dasherize()}-${id}`,
driver: () => faker.helpers.randomize(DRIVERS),
Resources: generateResources,
originalResources: generateResources,
resources: function() {
// Generate resources the usual way, but transform to the old
// shape because that's what the job spec uses.
const resources = this.originalResources;
return {
CPU: resources.Cpu.CpuShares,
MemoryMB: resources.Memory.MemoryMB,
DiskMB: resources.Disk.DiskMB,
};
},
Lifecycle: i => {
const cycle = i % 5;

View File

@ -18,14 +18,14 @@ export default ApplicationSerializer.extend({
function serializeAllocation(allocation) {
allocation.TaskStates = allocation.TaskStates.reduce(arrToObj('Name'), {});
allocation.Resources = allocation.TaskResources.mapBy('Resources').reduce(
(hash, resources) => {
['CPU', 'DiskMB', 'IOPS', 'MemoryMB'].forEach(key => (hash[key] += resources[key]));
hash.Networks = resources.Networks;
hash.Ports = resources.Ports;
return hash;
},
{ CPU: 0, DiskMB: 0, IOPS: 0, MemoryMB: 0 }
);
allocation.TaskResources = allocation.TaskResources.reduce(arrToObj('Name', 'Resources'), {});
const { Ports, Networks } = allocation.TaskResources[0]
? allocation.TaskResources[0].Resources
: {};
allocation.AllocatedResources = {
Shared: { Ports, Networks },
Tasks: allocation.TaskResources.map(({ Name, Resources }) => ({ Name, ...Resources })).reduce(
arrToObj('Name'),
{}
),
};
}

View File

@ -26,7 +26,7 @@ module('Acceptance | allocation detail', function(hooks) {
withGroupServices: true,
createAllocations: false,
});
allocation = server.create('allocation', 'withTaskWithPorts', 'withAllocatedResources', {
allocation = server.create('allocation', 'withTaskWithPorts', {
clientStatus: 'running',
});
@ -87,7 +87,7 @@ module('Acceptance | allocation detail', function(hooks) {
createAllocations: false,
});
const allocation = server.create('allocation', 'withTaskWithPorts', 'withAllocatedResources', {
const allocation = server.create('allocation', 'withTaskWithPorts', {
clientStatus: 'running',
jobId: job.id,
});
@ -188,7 +188,7 @@ module('Acceptance | allocation detail', function(hooks) {
createAllocations: false,
});
allocation = server.create('allocation', 'withTaskWithPorts', 'withAllocatedResources', {
allocation = server.create('allocation', 'withTaskWithPorts', {
clientStatus: 'running',
jobId: job.id,
});
@ -216,7 +216,7 @@ module('Acceptance | allocation detail', function(hooks) {
});
test('ports are listed', async function(assert) {
const allServerPorts = allocation.allocatedResources.Shared.Ports;
const allServerPorts = allocation.taskResources.models[0].resources.Ports;
allServerPorts.sortBy('Label').forEach((serverPort, index) => {
const renderedPort = Allocation.ports[index];

View File

@ -134,8 +134,8 @@ module('Acceptance | client detail', function(hooks) {
});
const tasks = taskGroup.taskIds.map(id => server.db.tasks.find(id));
const cpuUsed = tasks.reduce((sum, task) => sum + task.Resources.CPU, 0);
const memoryUsed = tasks.reduce((sum, task) => sum + task.Resources.MemoryMB, 0);
const cpuUsed = tasks.reduce((sum, task) => sum + task.resources.CPU, 0);
const memoryUsed = tasks.reduce((sum, task) => sum + task.resources.MemoryMB, 0);
await ClientDetail.visit({ id: node.id });

View File

@ -94,8 +94,8 @@ module('Acceptance | plugin detail', function(hooks) {
});
const tasks = taskGroup.taskIds.map(id => server.db.tasks.find(id));
const cpuUsed = tasks.reduce((sum, task) => sum + task.Resources.CPU, 0);
const memoryUsed = tasks.reduce((sum, task) => sum + task.Resources.MemoryMB, 0);
const cpuUsed = tasks.reduce((sum, task) => sum + task.resources.CPU, 0);
const memoryUsed = tasks.reduce((sum, task) => sum + task.resources.MemoryMB, 0);
await PluginDetail.visit({ id: plugin.id });

View File

@ -74,8 +74,8 @@ module('Acceptance | task group detail', function(hooks) {
});
test('/jobs/:id/:task-group should list high-level metrics for the allocation', async function(assert) {
const totalCPU = tasks.mapBy('Resources.CPU').reduce(sum, 0);
const totalMemory = tasks.mapBy('Resources.MemoryMB').reduce(sum, 0);
const totalCPU = tasks.mapBy('resources.CPU').reduce(sum, 0);
const totalMemory = tasks.mapBy('resources.MemoryMB').reduce(sum, 0);
const totalDisk = taskGroup.ephemeralDisk.SizeMB;
await TaskGroup.visit({ id: job.id, name: taskGroup.name });
@ -199,8 +199,8 @@ module('Acceptance | task group detail', function(hooks) {
const allocStats = server.db.clientAllocationStats.find(allocation.id);
const tasks = taskGroup.taskIds.map(id => server.db.tasks.find(id));
const cpuUsed = tasks.reduce((sum, task) => sum + task.Resources.CPU, 0);
const memoryUsed = tasks.reduce((sum, task) => sum + task.Resources.MemoryMB, 0);
const cpuUsed = tasks.reduce((sum, task) => sum + task.resources.CPU, 0);
const memoryUsed = tasks.reduce((sum, task) => sum + task.resources.MemoryMB, 0);
assert.equal(
allocationRow.cpu,

View File

@ -106,8 +106,8 @@ module('Acceptance | volume detail', function(hooks) {
});
const tasks = taskGroup.taskIds.map(id => server.db.tasks.find(id));
const cpuUsed = tasks.reduce((sum, task) => sum + task.Resources.CPU, 0);
const memoryUsed = tasks.reduce((sum, task) => sum + task.Resources.MemoryMB, 0);
const cpuUsed = tasks.reduce((sum, task) => sum + task.resources.CPU, 0);
const memoryUsed = tasks.reduce((sum, task) => sum + task.resources.MemoryMB, 0);
await VolumeDetail.visit({ id: volume.id });