e03c5a43be
* Added to subnav and basic table implemented * Existing services become service fragments, and services tab aggregated beneath job route * Index page within jobs/job/services * Watchable services * Lintfixes * Links to clients and individual services set up * Child service route * Keyboard shortcuts on service page * Model that shows consul services as well, plus level and provider cols * lintfix * Level as query param * Watch job for service name changes too * Group level service fixtures established * Progress at task level and job-linked services * Task and group services on update * Fixture side-effect cleanup * Basic acceptance tests for job services * Testmodel cleanup * Disabled mirage logging * New cluster type specifically for services * Without explicit job-model binding * Trying to isolate a tostring error * Account for new tab in keyboardnav * More test isolation attempts * Remove skipped tests and link task to parent group by id ui: add service health viz to table (#14369) * ui: add service-status-bar * test: service-status-bar * refact: update component api for new data struct * ui: format service health struct * ui: add service health viz to table * temp: add placeholder to remind conditional watcher * test: write tests for transformation algorithm * refact: update transformation algo * ui: conditionally long poll checks endpoint * refact: add conditional logic for nomad provider refact: update service-fragment model to include owner info ui: differentiate between task and group-level in derived state comp test: add test to document behavior refact: update tests for api change refact: update integration test for API change chore: remove unsused vars chore: elvis operator to protect mirage refact: create refId instead of internalModel refact: update algo refact: update conditional template logic refact: update test for api change: chore: cant use if and not in hbs conditional
251 lines
6.9 KiB
JavaScript
251 lines
6.9 KiB
JavaScript
import { Factory, trait } from 'ember-cli-mirage';
|
|
import faker from 'nomad-ui/mirage/faker';
|
|
import { provide } from '../utils';
|
|
import { generateResources } from '../common';
|
|
import { dasherize } from '@ember/string';
|
|
|
|
const DISK_RESERVATIONS = [200, 500, 1000, 2000, 5000, 10000, 100000];
|
|
|
|
export default Factory.extend({
|
|
name: (id) => `${dasherize(faker.hacker.noun())}-g-${id}`,
|
|
count: () => faker.random.number({ min: 1, max: 2 }),
|
|
|
|
ephemeralDisk: () => ({
|
|
Sticky: faker.random.boolean(),
|
|
SizeMB: faker.helpers.randomize(DISK_RESERVATIONS),
|
|
Migrate: faker.random.boolean(),
|
|
}),
|
|
|
|
noHostVolumes: trait({
|
|
volumes: () => ({}),
|
|
}),
|
|
|
|
withScaling: faker.random.boolean,
|
|
|
|
volumes: makeHostVolumes(),
|
|
|
|
// Directive used to control whether or not allocations are automatically
|
|
// created.
|
|
createAllocations: true,
|
|
|
|
// Directived used to control whether or not the allocation should fail
|
|
// and reschedule, creating reschedule events.
|
|
withRescheduling: false,
|
|
|
|
// Directive used to control whether the task group should have services.
|
|
withServices: false,
|
|
|
|
// Whether the tasks themselves should have services.
|
|
withTaskServices: false,
|
|
|
|
// Directive used to control whether dynamic application sizing recommendations
|
|
// should be created.
|
|
createRecommendations: false,
|
|
|
|
// When true, only creates allocations
|
|
shallow: false,
|
|
|
|
// When set, passed into tasks to set resource values
|
|
resourceSpec: null,
|
|
|
|
afterCreate(group, server) {
|
|
let taskIds = [];
|
|
let volumes = Object.keys(group.volumes);
|
|
|
|
if (group.withScaling) {
|
|
group.update({
|
|
scaling: {
|
|
Min: 1,
|
|
Max: 5,
|
|
Policy: faker.random.boolean() && {
|
|
EvaluationInterval: '10s',
|
|
Cooldown: '2m',
|
|
Check: {
|
|
avg_conn: {
|
|
Source: 'prometheus',
|
|
Query:
|
|
'scalar(avg((haproxy_server_current_sessions{backend="http_back"}) and (haproxy_server_up{backend="http_back"} == 1)))',
|
|
Strategy: {
|
|
'target-value': {
|
|
target: 20,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
});
|
|
}
|
|
|
|
if (!group.shallow) {
|
|
const resources =
|
|
group.resourceSpec &&
|
|
divide(group.count, parseResourceSpec(group.resourceSpec));
|
|
const tasks = provide(group.count, (_, idx) => {
|
|
const mounts = faker.helpers
|
|
.shuffle(volumes)
|
|
.slice(0, faker.random.number({ min: 1, max: 3 }));
|
|
|
|
const maybeResources = {};
|
|
if (resources) {
|
|
maybeResources.originalResources = generateResources(resources[idx]);
|
|
}
|
|
return server.create('task', {
|
|
taskGroupID: group.id,
|
|
...maybeResources,
|
|
withServices: group.withTaskServices,
|
|
volumeMounts: mounts.map((mount) => ({
|
|
Volume: mount,
|
|
Destination: `/${faker.internet.userName()}/${faker.internet.domainWord()}/${faker.internet.color()}`,
|
|
PropagationMode: '',
|
|
ReadOnly: faker.random.boolean(),
|
|
})),
|
|
createRecommendations: group.createRecommendations,
|
|
});
|
|
});
|
|
taskIds = tasks.mapBy('id');
|
|
}
|
|
|
|
group.update({
|
|
taskIds: taskIds,
|
|
});
|
|
|
|
if (group.createAllocations) {
|
|
Array(group.count)
|
|
.fill(null)
|
|
.forEach((_, i) => {
|
|
const props = {
|
|
jobId: group.job.id,
|
|
namespace: group.job.namespace,
|
|
taskGroup: group.name,
|
|
name: `${group.name}.[${i}]`,
|
|
rescheduleSuccess: group.withRescheduling
|
|
? faker.random.boolean()
|
|
: null,
|
|
rescheduleAttempts: group.withRescheduling
|
|
? faker.random.number({ min: 1, max: 5 })
|
|
: 0,
|
|
};
|
|
|
|
if (group.withRescheduling) {
|
|
server.create('allocation', 'rescheduled', props);
|
|
} else {
|
|
server.create('allocation', props);
|
|
}
|
|
});
|
|
}
|
|
|
|
if (group.withServices) {
|
|
const services = server.createList('service-fragment', 5, {
|
|
taskGroupId: group.id,
|
|
taskGroup: group,
|
|
provider: 'nomad',
|
|
});
|
|
|
|
services.push(
|
|
server.create('service-fragment', {
|
|
taskGroupId: group.id,
|
|
taskGroup: group,
|
|
provider: 'consul',
|
|
})
|
|
);
|
|
|
|
services.forEach((fragment) => {
|
|
server.create('service', {
|
|
serviceName: fragment.name,
|
|
id: `${faker.internet.domainWord()}-group-${fragment.name}`,
|
|
});
|
|
server.create('service', {
|
|
serviceName: fragment.name,
|
|
id: `${faker.internet.domainWord()}-group-${fragment.name}`,
|
|
});
|
|
server.create('service', {
|
|
serviceName: fragment.name,
|
|
id: `${faker.internet.domainWord()}-group-${fragment.name}`,
|
|
});
|
|
});
|
|
|
|
group.update({
|
|
services,
|
|
});
|
|
}
|
|
},
|
|
});
|
|
|
|
function makeHostVolumes() {
|
|
const generate = () => ({
|
|
Name: faker.internet.domainWord(),
|
|
Type: 'host',
|
|
Source: faker.internet.domainWord(),
|
|
ReadOnly: faker.random.boolean(),
|
|
});
|
|
|
|
const volumes = provide(faker.random.number({ min: 1, max: 5 }), generate);
|
|
return volumes.reduce((hash, volume) => {
|
|
hash[volume.Name] = volume;
|
|
return hash;
|
|
}, {});
|
|
}
|
|
|
|
function parseResourceSpec(spec) {
|
|
const mapping = {
|
|
M: 'MemoryMB',
|
|
C: 'CPU',
|
|
D: 'DiskMB',
|
|
I: 'IOPS',
|
|
};
|
|
|
|
const terms = spec.split(',').map((t) => {
|
|
const [k, v] = t
|
|
.trim()
|
|
.split(':')
|
|
.map((kv) => kv.trim());
|
|
return [k, +v];
|
|
});
|
|
|
|
return terms.reduce((hash, term) => {
|
|
hash[mapping[term[0]]] = term[1];
|
|
return hash;
|
|
}, {});
|
|
}
|
|
|
|
// Split a single resources object into N resource objects where
|
|
// the sum of each property of the new resources objects equals
|
|
// the original resources properties
|
|
// ex: divide(2, { Mem: 400, Cpu: 250 }) -> [{ Mem: 80, Cpu: 50 }, { Mem: 320, Cpu: 200 }]
|
|
function divide(count, resources) {
|
|
const wheel = roulette(1, count);
|
|
|
|
const ret = provide(count, (_, idx) => {
|
|
return Object.keys(resources).reduce((hash, key) => {
|
|
hash[key] = Math.round(resources[key] * wheel[idx]);
|
|
return hash;
|
|
}, {});
|
|
});
|
|
|
|
return ret;
|
|
}
|
|
|
|
// Roulette splits a number into N divisions
|
|
// Variance is a value between 0 and 1 that determines how much each division in
|
|
// size. At 0 each division is even, at 1, it's entirely random but the sum of all
|
|
// divisions is guaranteed to equal the total value.
|
|
function roulette(total, divisions, variance = 0.8) {
|
|
let roulette = new Array(divisions).fill(total / divisions);
|
|
roulette.forEach((v, i) => {
|
|
if (i === roulette.length - 1) return;
|
|
roulette.splice(
|
|
i,
|
|
2,
|
|
...rngDistribute(roulette[i], roulette[i + 1], variance)
|
|
);
|
|
});
|
|
return roulette;
|
|
}
|
|
|
|
function rngDistribute(a, b, variance = 0.8) {
|
|
const move =
|
|
a * faker.random.number({ min: 0, max: variance, precision: 0.01 });
|
|
return [a - move, b + move];
|
|
}
|