2020-09-09 20:59:07 +00:00
|
|
|
package rescheduling
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"reflect"
|
|
|
|
"sort"
|
|
|
|
"time"
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
e2e "github.com/hashicorp/nomad/e2e/e2eutil"
|
2020-09-09 20:59:07 +00:00
|
|
|
"github.com/hashicorp/nomad/e2e/framework"
|
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
|
|
|
"github.com/hashicorp/nomad/jobspec"
|
2021-07-29 14:52:04 +00:00
|
|
|
"github.com/hashicorp/nomad/testutil"
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
2020-09-28 20:37:34 +00:00
|
|
|
const ns = ""
|
|
|
|
|
2020-09-09 20:59:07 +00:00
|
|
|
type RescheduleE2ETest struct {
|
|
|
|
framework.TC
|
|
|
|
jobIds []string
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
framework.AddSuites(&framework.TestSuite{
|
|
|
|
Component: "Rescheduling",
|
|
|
|
CanRunLocal: true,
|
|
|
|
Consul: true,
|
|
|
|
Cases: []framework.TestCase{
|
|
|
|
new(RescheduleE2ETest),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tc *RescheduleE2ETest) BeforeAll(f *framework.F) {
|
2020-09-16 20:10:06 +00:00
|
|
|
e2e.WaitForLeader(f.T(), tc.Nomad())
|
|
|
|
e2e.WaitForNodesReady(f.T(), tc.Nomad(), 1)
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (tc *RescheduleE2ETest) AfterEach(f *framework.F) {
|
|
|
|
if os.Getenv("NOMAD_TEST_SKIPCLEANUP") == "1" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range tc.jobIds {
|
2020-09-16 20:10:06 +00:00
|
|
|
_, err := e2e.Command("nomad", "job", "stop", "-purge", id)
|
2020-09-30 13:00:37 +00:00
|
|
|
f.Assert().NoError(err)
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
|
|
|
tc.jobIds = []string{}
|
2020-09-16 20:10:06 +00:00
|
|
|
_, err := e2e.Command("nomad", "system", "gc")
|
2020-09-30 13:00:37 +00:00
|
|
|
f.Assert().NoError(err)
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestNoReschedule runs a job that should fail and never reschedule
|
|
|
|
func (tc *RescheduleE2ETest) TestNoReschedule(f *framework.F) {
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-no-reschedule-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/norescheduling.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
|
|
|
expected := []string{"failed", "failed", "failed"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
2020-09-16 20:10:06 +00:00
|
|
|
"should have exactly 3 failed allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestNoRescheduleSystem runs a system job that should fail and never reschedule
|
|
|
|
func (tc *RescheduleE2ETest) TestNoRescheduleSystem(f *framework.F) {
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-reschedule-system-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_system.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForAllocStatusComparison(
|
2020-09-28 20:37:34 +00:00
|
|
|
func() ([]string, error) { return e2e.AllocStatuses(jobID, ns) },
|
2020-09-16 20:10:06 +00:00
|
|
|
func(got []string) bool {
|
|
|
|
for _, status := range got {
|
|
|
|
if status != "failed" {
|
|
|
|
return false
|
|
|
|
}
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
2020-09-16 20:10:06 +00:00
|
|
|
return true
|
|
|
|
}, nil,
|
|
|
|
),
|
|
|
|
"should have only failed allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestDefaultReschedule runs a job that should reschedule after delay
|
|
|
|
func (tc *RescheduleE2ETest) TestDefaultReschedule(f *framework.F) {
|
|
|
|
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-default-reschedule-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_default.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
|
|
|
expected := []string{"failed", "failed", "failed"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
2020-09-16 20:10:06 +00:00
|
|
|
"should have exactly 3 failed allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// TODO(tgross): return early if "slow" isn't set
|
|
|
|
// wait until first exponential delay kicks in and rescheduling is attempted
|
|
|
|
time.Sleep(time.Second * 35)
|
|
|
|
expected = []string{"failed", "failed", "failed", "failed", "failed", "failed"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
2020-09-16 20:10:06 +00:00
|
|
|
"should have exactly 6 failed allocs after 35s",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestRescheduleMaxAttempts runs a job with a maximum reschedule attempts
|
|
|
|
func (tc *RescheduleE2ETest) TestRescheduleMaxAttempts(f *framework.F) {
|
|
|
|
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-reschedule-fail-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_fail.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
|
|
|
expected := []string{"failed", "failed", "failed"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
2020-09-16 20:10:06 +00:00
|
|
|
"should have exactly 3 failed allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
job, err := jobspec.ParseFile("rescheduling/input/rescheduling_fail.nomad")
|
|
|
|
f.NoError(err)
|
|
|
|
job.ID = &jobID
|
|
|
|
job.TaskGroups[0].Tasks[0].Config["args"] = []string{"-c", "sleep 15000"}
|
|
|
|
_, _, err = tc.Nomad().Jobs().Register(job, nil)
|
|
|
|
f.NoError(err, "could not register updated job")
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForAllocStatusComparison(
|
2020-09-28 20:37:34 +00:00
|
|
|
func() ([]string, error) { return e2e.AllocStatuses(jobID, ns) },
|
2020-09-16 20:10:06 +00:00
|
|
|
func(got []string) bool {
|
|
|
|
for _, status := range got {
|
|
|
|
if status == "running" {
|
|
|
|
return true
|
|
|
|
}
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
2020-09-16 20:10:06 +00:00
|
|
|
return false
|
|
|
|
}, nil,
|
|
|
|
),
|
|
|
|
"should have at least 1 running alloc",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestRescheduleSuccess runs a job that should be running after rescheduling
|
|
|
|
func (tc *RescheduleE2ETest) TestRescheduleSuccess(f *framework.F) {
|
|
|
|
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-reschedule-success-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_success.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForAllocStatusComparison(
|
2020-09-28 20:37:34 +00:00
|
|
|
func() ([]string, error) { return e2e.AllocStatuses(jobID, ns) },
|
2020-09-16 20:10:06 +00:00
|
|
|
func(got []string) bool {
|
|
|
|
for _, status := range got {
|
|
|
|
if status == "running" {
|
|
|
|
return true
|
|
|
|
}
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
2020-09-16 20:10:06 +00:00
|
|
|
return false
|
|
|
|
}, nil,
|
|
|
|
),
|
|
|
|
"should have at least 1 running alloc",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestRescheduleWithUpdate updates a running job to fail, and verifies that
|
|
|
|
// it gets rescheduled
|
|
|
|
func (tc *RescheduleE2ETest) TestRescheduleWithUpdate(f *framework.F) {
|
|
|
|
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-reschedule-update-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_update.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
|
|
|
expected := []string{"running", "running", "running"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
2020-09-16 20:10:06 +00:00
|
|
|
"should have exactly 3 running allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// reschedule to make fail
|
|
|
|
job, err := jobspec.ParseFile("rescheduling/input/rescheduling_update.nomad")
|
|
|
|
f.NoError(err)
|
|
|
|
job.ID = &jobID
|
|
|
|
job.TaskGroups[0].Tasks[0].Config["args"] = []string{"-c", "lol"}
|
|
|
|
_, _, err = tc.Nomad().Jobs().Register(job, nil)
|
|
|
|
f.NoError(err, "could not register updated job")
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForAllocStatusComparison(
|
2020-09-28 20:37:34 +00:00
|
|
|
func() ([]string, error) { return e2e.AllocStatusesRescheduled(jobID, ns) },
|
2020-09-16 20:10:06 +00:00
|
|
|
func(got []string) bool { return len(got) > 0 }, nil,
|
|
|
|
),
|
|
|
|
"should have rescheduled allocs until progress deadline",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestRescheduleWithCanary updates a running job to fail, and verify that the
|
|
|
|
// canary gets rescheduled
|
|
|
|
func (tc *RescheduleE2ETest) TestRescheduleWithCanary(f *framework.F) {
|
|
|
|
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-reschedule-canary-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_canary.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
|
|
|
expected := []string{"running", "running", "running"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
2020-09-16 20:10:06 +00:00
|
|
|
"should have exactly 3 running allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForLastDeploymentStatus(jobID, ns, "successful", nil),
|
2020-09-16 20:10:06 +00:00
|
|
|
"deployment should be successful")
|
2020-09-09 20:59:07 +00:00
|
|
|
|
|
|
|
// reschedule to make fail
|
|
|
|
job, err := jobspec.ParseFile("rescheduling/input/rescheduling_canary.nomad")
|
|
|
|
f.NoError(err)
|
|
|
|
job.ID = &jobID
|
|
|
|
job.TaskGroups[0].Tasks[0].Config["args"] = []string{"-c", "lol"}
|
|
|
|
_, _, err = tc.Nomad().Jobs().Register(job, nil)
|
|
|
|
f.NoError(err, "could not register updated job")
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForAllocStatusComparison(
|
2020-09-28 20:37:34 +00:00
|
|
|
func() ([]string, error) { return e2e.AllocStatusesRescheduled(jobID, ns) },
|
2020-09-16 20:10:06 +00:00
|
|
|
func(got []string) bool { return len(got) > 0 }, nil,
|
|
|
|
),
|
|
|
|
"should have rescheduled allocs until progress deadline",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForLastDeploymentStatus(jobID, ns, "running", nil),
|
2020-09-16 20:10:06 +00:00
|
|
|
"deployment should be running")
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
|
|
|
|
2021-08-30 09:08:12 +00:00
|
|
|
// TestRescheduleWithCanaryAutoRevert updates a running job to fail, and
|
|
|
|
// verifies that the job gets reverted.
|
2020-09-09 20:59:07 +00:00
|
|
|
func (tc *RescheduleE2ETest) TestRescheduleWithCanaryAutoRevert(f *framework.F) {
|
|
|
|
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-reschedule-canary-revert-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_canary_autorevert.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
|
|
|
expected := []string{"running", "running", "running"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
2020-09-16 20:10:06 +00:00
|
|
|
"should have exactly 3 running allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForLastDeploymentStatus(jobID, ns, "successful", nil),
|
2020-09-16 20:10:06 +00:00
|
|
|
"deployment should be successful")
|
2020-09-09 20:59:07 +00:00
|
|
|
|
|
|
|
// reschedule to make fail
|
|
|
|
job, err := jobspec.ParseFile("rescheduling/input/rescheduling_canary_autorevert.nomad")
|
|
|
|
f.NoError(err)
|
|
|
|
job.ID = &jobID
|
|
|
|
job.TaskGroups[0].Tasks[0].Config["args"] = []string{"-c", "lol"}
|
|
|
|
_, _, err = tc.Nomad().Jobs().Register(job, nil)
|
|
|
|
f.NoError(err, "could not register updated job")
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForAllocStatusComparison(
|
2020-09-28 20:37:34 +00:00
|
|
|
func() ([]string, error) { return e2e.AllocStatusesRescheduled(jobID, ns) },
|
2020-09-21 12:17:24 +00:00
|
|
|
func(got []string) bool { return len(got) > 0 }, nil,
|
2020-09-16 20:10:06 +00:00
|
|
|
),
|
|
|
|
"should have new allocs after update",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// then we'll fail and revert
|
|
|
|
expected = []string{"failed", "failed", "failed", "running", "running", "running"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
2020-09-16 20:10:06 +00:00
|
|
|
"should have exactly 3 running reverted allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForLastDeploymentStatus(jobID, ns, "successful", nil),
|
2020-09-16 20:10:06 +00:00
|
|
|
"deployment should be successful")
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestRescheduleMaxParallel updates a job with a max_parallel config
|
|
|
|
func (tc *RescheduleE2ETest) TestRescheduleMaxParallel(f *framework.F) {
|
|
|
|
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-reschedule-maxp-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_maxp.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
|
|
|
expected := []string{"running", "running", "running"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
2020-09-16 20:10:06 +00:00
|
|
|
"should have exactly 3 running allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForLastDeploymentStatus(jobID, ns, "successful", nil),
|
2020-09-16 20:10:06 +00:00
|
|
|
"deployment should be successful")
|
2020-09-09 20:59:07 +00:00
|
|
|
|
|
|
|
// reschedule to make fail
|
|
|
|
job, err := jobspec.ParseFile("rescheduling/input/rescheduling_maxp.nomad")
|
|
|
|
f.NoError(err)
|
|
|
|
job.ID = &jobID
|
|
|
|
job.TaskGroups[0].Tasks[0].Config["args"] = []string{"-c", "lol"}
|
|
|
|
_, _, err = tc.Nomad().Jobs().Register(job, nil)
|
|
|
|
f.NoError(err, "could not register updated job")
|
|
|
|
|
|
|
|
expected = []string{"complete", "failed", "failed", "running", "running"}
|
2020-09-16 20:10:06 +00:00
|
|
|
|
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForAllocStatusComparison(
|
2020-09-28 20:37:34 +00:00
|
|
|
func() ([]string, error) { return e2e.AllocStatuses(jobID, ns) },
|
2020-09-16 20:10:06 +00:00
|
|
|
func(got []string) bool {
|
|
|
|
sort.Strings(got)
|
|
|
|
return reflect.DeepEqual(got, expected)
|
|
|
|
}, nil,
|
|
|
|
),
|
|
|
|
"should have failed allocs including rescheduled failed allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForLastDeploymentStatus(jobID, ns, "running", nil),
|
2020-09-16 20:10:06 +00:00
|
|
|
"deployment should be running")
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestRescheduleMaxParallelAutoRevert updates a job with a max_parallel
|
|
|
|
// config that will autorevert on failure
|
|
|
|
func (tc *RescheduleE2ETest) TestRescheduleMaxParallelAutoRevert(f *framework.F) {
|
|
|
|
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-reschedule-maxp-revert-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_maxp_autorevert.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
|
|
|
expected := []string{"running", "running", "running"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
2020-09-16 20:10:06 +00:00
|
|
|
"should have exactly 3 running allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-28 20:37:34 +00:00
|
|
|
e2e.WaitForLastDeploymentStatus(jobID, ns, "successful", nil),
|
2020-09-16 20:10:06 +00:00
|
|
|
"deployment should be successful")
|
2020-09-09 20:59:07 +00:00
|
|
|
|
|
|
|
// reschedule to make fail
|
|
|
|
job, err := jobspec.ParseFile("rescheduling/input/rescheduling_maxp_autorevert.nomad")
|
|
|
|
f.NoError(err)
|
|
|
|
job.ID = &jobID
|
|
|
|
job.TaskGroups[0].Tasks[0].Config["args"] = []string{"-c", "lol"}
|
|
|
|
_, _, err = tc.Nomad().Jobs().Register(job, nil)
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(err, "could not e2e.Register updated job")
|
|
|
|
|
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForAllocStatusComparison(
|
2020-09-28 20:37:34 +00:00
|
|
|
func() ([]string, error) { return e2e.AllocStatusesRescheduled(jobID, ns) },
|
2020-09-21 12:17:24 +00:00
|
|
|
func(got []string) bool { return len(got) > 0 }, nil,
|
2020-09-16 20:10:06 +00:00
|
|
|
),
|
|
|
|
"should have new allocs after update",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// wait for the revert
|
|
|
|
expected = []string{"complete", "failed", "running", "running", "running"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForAllocStatusComparison(
|
2020-09-28 20:37:34 +00:00
|
|
|
func() ([]string, error) { return e2e.AllocStatuses(jobID, ns) },
|
2020-09-16 20:10:06 +00:00
|
|
|
func(got []string) bool {
|
|
|
|
sort.Strings(got)
|
|
|
|
return reflect.DeepEqual(got, expected)
|
|
|
|
}, nil,
|
|
|
|
),
|
|
|
|
"should have one successful, one failed, and 3 reverted allocs",
|
2020-09-09 20:59:07 +00:00
|
|
|
)
|
|
|
|
|
2020-10-14 15:35:30 +00:00
|
|
|
// at this point the allocs have been checked but we need to wait for the
|
|
|
|
// deployment to be marked complete before we can assert that it's successful
|
|
|
|
// and verify the count of deployments
|
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForLastDeploymentStatus(jobID, ns, "successful", nil),
|
|
|
|
"most recent deployment should be successful")
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
out, err := e2e.Command("nomad", "deployment", "status")
|
2020-09-09 20:59:07 +00:00
|
|
|
f.NoError(err, "could not get deployment status")
|
|
|
|
|
2020-09-16 20:10:06 +00:00
|
|
|
results, err := e2e.ParseColumns(out)
|
2020-09-09 20:59:07 +00:00
|
|
|
f.NoError(err, "could not parse deployment status")
|
2020-10-14 15:35:30 +00:00
|
|
|
statuses := map[string]int{}
|
2020-09-09 20:59:07 +00:00
|
|
|
for _, row := range results {
|
|
|
|
if row["Job ID"] == jobID {
|
2020-10-14 15:35:30 +00:00
|
|
|
statuses[row["Status"]]++
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
|
|
|
}
|
2020-10-14 15:35:30 +00:00
|
|
|
|
|
|
|
f.Equal(1, statuses["failed"],
|
|
|
|
fmt.Sprintf("expected only 1 failed deployment, got:\n%s", out))
|
|
|
|
f.Equal(2, statuses["successful"],
|
|
|
|
fmt.Sprintf("expected 2 successful deployments, got:\n%s", out))
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
|
|
|
|
2020-09-29 15:02:16 +00:00
|
|
|
// TestRescheduleProgressDeadline verifies the progress deadline is reset with
|
|
|
|
// each healthy allocation, and that a rescheduled allocation does not.
|
2020-09-09 20:59:07 +00:00
|
|
|
func (tc *RescheduleE2ETest) TestRescheduleProgressDeadline(f *framework.F) {
|
|
|
|
|
2020-09-11 13:21:28 +00:00
|
|
|
jobID := "test-reschedule-deadline-" + uuid.Generate()[0:8]
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_progressdeadline.nomad"))
|
2020-09-09 20:59:07 +00:00
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
2020-09-29 15:02:16 +00:00
|
|
|
expected := []string{"running"}
|
2020-09-16 20:10:06 +00:00
|
|
|
f.NoError(
|
2020-09-29 15:02:16 +00:00
|
|
|
e2e.WaitForAllocStatusExpected(jobID, ns, expected),
|
|
|
|
"should have a running allocation",
|
|
|
|
)
|
|
|
|
|
|
|
|
deploymentID, err := e2e.LastDeploymentID(jobID, ns)
|
|
|
|
f.NoError(err, "couldn't look up deployment")
|
|
|
|
|
|
|
|
oldDeadline, err := getProgressDeadline(deploymentID)
|
|
|
|
f.NoError(err, "could not get progress deadline")
|
|
|
|
time.Sleep(time.Second * 20)
|
|
|
|
|
|
|
|
newDeadline, err := getProgressDeadline(deploymentID)
|
|
|
|
f.NoError(err, "could not get new progress deadline")
|
|
|
|
f.NotEqual(oldDeadline, newDeadline, "progress deadline should have been updated")
|
|
|
|
|
|
|
|
f.NoError(e2e.WaitForLastDeploymentStatus(jobID, ns, "successful", nil),
|
2020-09-16 20:10:06 +00:00
|
|
|
"deployment should be successful")
|
2020-09-09 20:59:07 +00:00
|
|
|
}
|
2020-09-29 15:02:16 +00:00
|
|
|
|
|
|
|
// TestRescheduleProgressDeadlineFail verifies the progress deadline is reset with
|
|
|
|
// each healthy allocation, and that a rescheduled allocation does not.
|
|
|
|
func (tc *RescheduleE2ETest) TestRescheduleProgressDeadlineFail(f *framework.F) {
|
|
|
|
|
|
|
|
jobID := "test-reschedule-deadline-fail" + uuid.Generate()[0:8]
|
|
|
|
f.NoError(e2e.Register(jobID, "rescheduling/input/rescheduling_progressdeadline_fail.nomad"))
|
|
|
|
tc.jobIds = append(tc.jobIds, jobID)
|
|
|
|
|
2021-07-29 14:52:04 +00:00
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
_, err := e2e.LastDeploymentID(jobID, ns)
|
|
|
|
return err == nil, err
|
|
|
|
}, func(err error) {
|
|
|
|
f.NoError(err, "deployment wasn't created yet")
|
|
|
|
})
|
|
|
|
|
2020-09-29 15:02:16 +00:00
|
|
|
deploymentID, err := e2e.LastDeploymentID(jobID, ns)
|
|
|
|
f.NoError(err, "couldn't look up deployment")
|
|
|
|
|
|
|
|
oldDeadline, err := getProgressDeadline(deploymentID)
|
|
|
|
f.NoError(err, "could not get progress deadline")
|
|
|
|
time.Sleep(time.Second * 20)
|
|
|
|
|
|
|
|
f.NoError(e2e.WaitForLastDeploymentStatus(jobID, ns, "failed", nil),
|
|
|
|
"deployment should be failed")
|
|
|
|
|
|
|
|
f.NoError(
|
|
|
|
e2e.WaitForAllocStatusComparison(
|
|
|
|
func() ([]string, error) { return e2e.AllocStatuses(jobID, ns) },
|
|
|
|
func(got []string) bool {
|
|
|
|
for _, status := range got {
|
|
|
|
if status != "failed" {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}, nil,
|
|
|
|
),
|
|
|
|
"should have only failed allocs",
|
|
|
|
)
|
|
|
|
|
|
|
|
newDeadline, err := getProgressDeadline(deploymentID)
|
|
|
|
f.NoError(err, "could not get new progress deadline")
|
|
|
|
f.Equal(oldDeadline, newDeadline, "progress deadline should not have been updated")
|
|
|
|
}
|
|
|
|
|
|
|
|
func getProgressDeadline(deploymentID string) (time.Time, error) {
|
|
|
|
|
|
|
|
out, err := e2e.Command("nomad", "deployment", "status", deploymentID)
|
|
|
|
if err != nil {
|
|
|
|
return time.Time{}, fmt.Errorf("could not get deployment status: %v\n%v", err, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
section, err := e2e.GetSection(out, "Deployed")
|
|
|
|
if err != nil {
|
|
|
|
return time.Time{}, fmt.Errorf("could not find Deployed section: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
rows, err := e2e.ParseColumns(section)
|
|
|
|
if err != nil {
|
|
|
|
return time.Time{}, fmt.Errorf("could not parse Deployed section: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
layout := "2006-01-02T15:04:05Z07:00" // taken from command/helpers.go
|
|
|
|
raw := rows[0]["Progress Deadline"]
|
|
|
|
return time.Parse(layout, raw)
|
|
|
|
}
|