Merge pull request #3979 from hashicorp/b_update_compat_delete

Delete compatibility code for job level update stanza
This commit is contained in:
Preetha 2018-03-21 09:17:01 -05:00 committed by GitHub
commit 17f2f52f08
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 0 additions and 419 deletions

View file

@ -1814,85 +1814,6 @@ func (j *Job) Canonicalize() (warnings error) {
j.Periodic.Canonicalize()
}
// COMPAT: Remove in 0.7.0
// Rewrite any job that has an update block with pre 0.6.0 syntax.
jobHasOldUpdate := j.Update.Stagger > 0 && j.Update.MaxParallel > 0
if jobHasOldUpdate && j.Type != JobTypeBatch {
// Build an appropriate update block and copy it down to each task group
base := DefaultUpdateStrategy.Copy()
base.MaxParallel = j.Update.MaxParallel
base.MinHealthyTime = j.Update.Stagger
// Add to each task group, modifying as needed
upgraded := false
l := len(j.TaskGroups)
for _, tg := range j.TaskGroups {
// The task group doesn't need upgrading if it has an update block with the new syntax
u := tg.Update
if u != nil && u.Stagger > 0 && u.MaxParallel > 0 &&
u.HealthCheck != "" && u.MinHealthyTime > 0 && u.HealthyDeadline > 0 {
continue
}
upgraded = true
// The MaxParallel for the job should be 10% of the total count
// unless there is just one task group then we can infer the old
// max parallel should be the new
tgu := base.Copy()
if l != 1 {
// RoundTo 10%
var percent float64 = float64(tg.Count) * 0.1
tgu.MaxParallel = int(percent + 0.5)
}
// Safety guards
if tgu.MaxParallel == 0 {
tgu.MaxParallel = 1
} else if tgu.MaxParallel > tg.Count {
tgu.MaxParallel = tg.Count
}
tg.Update = tgu
}
if upgraded {
w := "A best effort conversion to new update stanza introduced in v0.6.0 applied. " +
"Please update upgrade stanza before v0.7.0."
multierror.Append(&mErr, fmt.Errorf(w))
}
}
// Ensure that the batch job doesn't have new style or old style update
// stanza. Unfortunately are scanning here because we have to deprecate over
// a release so we can't check in the task group since that may be new style
// but wouldn't capture the old style and we don't want to have duplicate
// warnings.
if j.Type == JobTypeBatch {
displayWarning := jobHasOldUpdate
j.Update.Stagger = 0
j.Update.MaxParallel = 0
j.Update.HealthCheck = ""
j.Update.MinHealthyTime = 0
j.Update.HealthyDeadline = 0
j.Update.AutoRevert = false
j.Update.Canary = 0
// Remove any update spec from the task groups
for _, tg := range j.TaskGroups {
if tg.Update != nil {
displayWarning = true
tg.Update = nil
}
}
if displayWarning {
w := "Update stanza is disallowed for batch jobs since v0.6.0. " +
"The update block has automatically been removed"
multierror.Append(&mErr, fmt.Errorf(w))
}
}
return mErr.ErrorOrNil()
}

View file

@ -11,7 +11,6 @@ import (
"github.com/hashicorp/consul/api"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/helper/uuid"
"github.com/kr/pretty"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@ -157,345 +156,6 @@ func TestJob_Warnings(t *testing.T) {
}
}
func TestJob_Canonicalize_Update(t *testing.T) {
cases := []struct {
Name string
Job *Job
Expected *Job
Warnings []string
}{
{
Name: "One task group",
Warnings: []string{"conversion to new update stanza"},
Job: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 2,
Stagger: 10 * time.Second,
},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
},
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 2,
Stagger: 10 * time.Second,
},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
RestartPolicy: NewRestartPolicy(JobTypeService),
ReschedulePolicy: NewReschedulePolicy(JobTypeService),
EphemeralDisk: DefaultEphemeralDisk(),
Update: &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 2,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
AutoRevert: false,
Canary: 0,
},
},
},
},
},
{
Name: "One task group batch",
Warnings: []string{"Update stanza is disallowed for batch jobs"},
Job: &Job{
Namespace: "test",
Type: JobTypeBatch,
Update: UpdateStrategy{
MaxParallel: 2,
Stagger: 10 * time.Second,
},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
},
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeBatch,
Update: UpdateStrategy{},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
RestartPolicy: NewRestartPolicy(JobTypeBatch),
ReschedulePolicy: NewReschedulePolicy(JobTypeBatch),
EphemeralDisk: DefaultEphemeralDisk(),
},
},
},
},
{
Name: "One task group batch - new spec",
Warnings: []string{"Update stanza is disallowed for batch jobs"},
Job: &Job{
Namespace: "test",
Type: JobTypeBatch,
Update: UpdateStrategy{
Stagger: 2 * time.Second,
MaxParallel: 2,
Canary: 2,
MinHealthyTime: 2 * time.Second,
HealthyDeadline: 10 * time.Second,
HealthCheck: UpdateStrategyHealthCheck_Checks,
},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
Update: &UpdateStrategy{
Stagger: 2 * time.Second,
MaxParallel: 2,
Canary: 2,
MinHealthyTime: 2 * time.Second,
HealthyDeadline: 10 * time.Second,
HealthCheck: UpdateStrategyHealthCheck_Checks,
},
},
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeBatch,
Update: UpdateStrategy{},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
RestartPolicy: NewRestartPolicy(JobTypeBatch),
ReschedulePolicy: NewReschedulePolicy(JobTypeBatch),
EphemeralDisk: DefaultEphemeralDisk(),
},
},
},
},
{
Name: "One task group service - new spec",
Job: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
Stagger: 2 * time.Second,
MaxParallel: 2,
Canary: 2,
MinHealthyTime: 2 * time.Second,
HealthyDeadline: 10 * time.Second,
HealthCheck: UpdateStrategyHealthCheck_Checks,
},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
Update: &UpdateStrategy{
Stagger: 2 * time.Second,
MaxParallel: 2,
Canary: 2,
MinHealthyTime: 2 * time.Second,
HealthyDeadline: 10 * time.Second,
HealthCheck: UpdateStrategyHealthCheck_Checks,
},
},
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
Stagger: 2 * time.Second,
MaxParallel: 2,
Canary: 2,
MinHealthyTime: 2 * time.Second,
HealthyDeadline: 10 * time.Second,
HealthCheck: UpdateStrategyHealthCheck_Checks,
},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
RestartPolicy: NewRestartPolicy(JobTypeService),
ReschedulePolicy: NewReschedulePolicy(JobTypeService),
EphemeralDisk: DefaultEphemeralDisk(),
Update: &UpdateStrategy{
Stagger: 2 * time.Second,
MaxParallel: 2,
Canary: 2,
MinHealthyTime: 2 * time.Second,
HealthyDeadline: 10 * time.Second,
HealthCheck: UpdateStrategyHealthCheck_Checks,
},
},
},
},
},
{
Name: "One task group; too high of parallelism",
Warnings: []string{"conversion to new update stanza"},
Job: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 200,
Stagger: 10 * time.Second,
},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
},
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 200,
Stagger: 10 * time.Second,
},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
RestartPolicy: NewRestartPolicy(JobTypeService),
ReschedulePolicy: NewReschedulePolicy(JobTypeService),
EphemeralDisk: DefaultEphemeralDisk(),
Update: &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 2,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
AutoRevert: false,
Canary: 0,
},
},
},
},
},
{
Name: "Multiple task group; rounding",
Warnings: []string{"conversion to new update stanza"},
Job: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 2,
Stagger: 10 * time.Second,
},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
},
{
Name: "bar",
Count: 14,
},
{
Name: "foo",
Count: 26,
},
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 2,
Stagger: 10 * time.Second,
},
TaskGroups: []*TaskGroup{
{
Name: "foo",
Count: 2,
RestartPolicy: NewRestartPolicy(JobTypeService),
ReschedulePolicy: NewReschedulePolicy(JobTypeService),
EphemeralDisk: DefaultEphemeralDisk(),
Update: &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 1,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
AutoRevert: false,
Canary: 0,
},
},
{
Name: "bar",
Count: 14,
RestartPolicy: NewRestartPolicy(JobTypeService),
ReschedulePolicy: NewReschedulePolicy(JobTypeService),
EphemeralDisk: DefaultEphemeralDisk(),
Update: &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 1,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
AutoRevert: false,
Canary: 0,
},
},
{
Name: "foo",
Count: 26,
EphemeralDisk: DefaultEphemeralDisk(),
RestartPolicy: NewRestartPolicy(JobTypeService),
ReschedulePolicy: NewReschedulePolicy(JobTypeService),
Update: &UpdateStrategy{
Stagger: 30 * time.Second,
MaxParallel: 3,
HealthCheck: UpdateStrategyHealthCheck_Checks,
MinHealthyTime: 10 * time.Second,
HealthyDeadline: 5 * time.Minute,
AutoRevert: false,
Canary: 0,
},
},
},
},
},
}
for _, c := range cases {
t.Run(c.Name, func(t *testing.T) {
warnings := c.Job.Canonicalize()
if !reflect.DeepEqual(c.Job, c.Expected) {
t.Fatalf("Diff %#v", pretty.Diff(c.Job, c.Expected))
}
wErr := ""
if warnings != nil {
wErr = warnings.Error()
}
for _, w := range c.Warnings {
if !strings.Contains(wErr, w) {
t.Fatalf("Wanted warning %q: got %q", w, wErr)
}
}
if len(c.Warnings) == 0 && warnings != nil {
t.Fatalf("Wanted no warnings: got %q", wErr)
}
})
}
}
func TestJob_SpecChanged(t *testing.T) {
// Get a base test job
base := testJob()