2015-08-15 23:07:50 +00:00
|
|
|
package nomad
|
|
|
|
|
|
|
|
import (
|
2017-10-23 22:04:00 +00:00
|
|
|
"fmt"
|
2015-08-15 23:07:50 +00:00
|
|
|
"testing"
|
2015-08-16 00:42:51 +00:00
|
|
|
"time"
|
2015-08-15 23:07:50 +00:00
|
|
|
|
2017-02-08 05:22:48 +00:00
|
|
|
memdb "github.com/hashicorp/go-memdb"
|
2020-05-11 12:20:50 +00:00
|
|
|
msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
|
2022-03-15 12:42:43 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2022-07-19 13:37:46 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/pointer"
|
2018-01-22 22:31:38 +00:00
|
|
|
"github.com/hashicorp/nomad/helper/uuid"
|
2015-08-15 23:07:50 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
2020-05-06 20:49:12 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/state"
|
2015-08-15 23:07:50 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
|
|
|
"github.com/hashicorp/nomad/testutil"
|
2023-01-31 18:32:14 +00:00
|
|
|
"github.com/shoenig/test/must"
|
2017-06-29 19:32:37 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
2018-01-22 22:31:38 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
2015-08-15 23:07:50 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestCoreScheduler_EvalGC(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2015-08-15 23:07:50 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2016-08-11 21:36:22 +00:00
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2015-08-15 23:07:50 +00:00
|
|
|
// Insert "dead" eval
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2015-08-15 23:07:50 +00:00
|
|
|
eval := mock.Eval()
|
|
|
|
eval.Status = structs.EvalStatusFailed
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))
|
|
|
|
err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})
|
|
|
|
require.Nil(t, err)
|
2018-01-22 22:31:38 +00:00
|
|
|
|
|
|
|
// Insert mock job with rescheduling disabled
|
|
|
|
job := mock.Job()
|
|
|
|
job.ID = eval.JobID
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 0,
|
|
|
|
Interval: 0 * time.Second,
|
2015-08-15 23:07:50 +00:00
|
|
|
}
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, job)
|
|
|
|
require.Nil(t, err)
|
2015-08-15 23:07:50 +00:00
|
|
|
|
|
|
|
// Insert "dead" alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.EvalID = eval.ID
|
2016-07-13 19:20:46 +00:00
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
2016-07-25 21:11:32 +00:00
|
|
|
alloc.JobID = eval.JobID
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc.TaskGroup = job.TaskGroups[0].Name
|
2016-08-04 18:24:17 +00:00
|
|
|
|
|
|
|
// Insert "lost" alloc
|
|
|
|
alloc2 := mock.Alloc()
|
|
|
|
alloc2.EvalID = eval.ID
|
|
|
|
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc2.ClientStatus = structs.AllocClientStatusLost
|
|
|
|
alloc2.JobID = eval.JobID
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc2.TaskGroup = job.TaskGroups[0].Name
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2})
|
2015-08-15 23:07:50 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2015-08-16 00:42:51 +00:00
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold))
|
|
|
|
|
2015-08-15 23:07:50 +00:00
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2015-08-15 23:07:50 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
2016-06-22 16:04:22 +00:00
|
|
|
gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000)
|
2015-08-15 23:07:50 +00:00
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be gone
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.EvalByID(ws, eval.ID)
|
2015-08-15 23:07:50 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA, err := store.AllocByID(ws, alloc.ID)
|
2015-08-15 23:07:50 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA != nil {
|
|
|
|
t.Fatalf("bad: %v", outA)
|
|
|
|
}
|
2016-08-04 18:24:17 +00:00
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA2, err := store.AllocByID(ws, alloc2.ID)
|
2016-08-04 18:24:17 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA2 != nil {
|
|
|
|
t.Fatalf("bad: %v", outA2)
|
|
|
|
}
|
2016-03-25 23:46:48 +00:00
|
|
|
}
|
|
|
|
|
2018-01-22 22:31:38 +00:00
|
|
|
// Tests GC behavior on allocations being rescheduled
|
2018-03-11 18:40:32 +00:00
|
|
|
func TestCoreScheduler_EvalGC_ReschedulingAllocs(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2018-01-22 22:31:38 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
// Insert "dead" eval
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2018-01-22 22:31:38 +00:00
|
|
|
eval := mock.Eval()
|
|
|
|
eval.Status = structs.EvalStatusFailed
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))
|
|
|
|
err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})
|
|
|
|
require.Nil(t, err)
|
2018-01-22 22:31:38 +00:00
|
|
|
|
2018-01-30 20:45:59 +00:00
|
|
|
// Insert "pending" eval for same job
|
|
|
|
eval2 := mock.Eval()
|
|
|
|
eval2.JobID = eval.JobID
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(999, mock.JobSummary(eval2.JobID))
|
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, 1003, []*structs.Evaluation{eval2})
|
|
|
|
require.Nil(t, err)
|
2018-01-30 20:45:59 +00:00
|
|
|
|
2018-01-22 22:31:38 +00:00
|
|
|
// Insert mock job with default reschedule policy of 2 in 10 minutes
|
|
|
|
job := mock.Job()
|
|
|
|
job.ID = eval.JobID
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, job)
|
|
|
|
require.Nil(t, err)
|
2018-01-22 22:31:38 +00:00
|
|
|
|
|
|
|
// Insert failed alloc with an old reschedule attempt, can be GCed
|
|
|
|
alloc := mock.Alloc()
|
2018-11-01 05:02:26 +00:00
|
|
|
alloc.Job = job
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc.EvalID = eval.ID
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
alloc.JobID = eval.JobID
|
|
|
|
alloc.TaskGroup = job.TaskGroups[0].Name
|
2018-04-11 18:58:02 +00:00
|
|
|
alloc.NextAllocation = uuid.Generate()
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc.RescheduleTracker = &structs.RescheduleTracker{
|
|
|
|
Events: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: time.Now().Add(-1 * time.Hour).UTC().UnixNano(),
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
PrevAllocID: uuid.Generate(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
alloc2 := mock.Alloc()
|
2018-11-01 05:02:26 +00:00
|
|
|
alloc2.Job = job
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc2.EvalID = eval.ID
|
|
|
|
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
|
2018-04-11 18:58:02 +00:00
|
|
|
alloc2.ClientStatus = structs.AllocClientStatusFailed
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc2.JobID = eval.JobID
|
|
|
|
alloc2.TaskGroup = job.TaskGroups[0].Name
|
|
|
|
alloc2.RescheduleTracker = &structs.RescheduleTracker{
|
|
|
|
Events: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: time.Now().Add(-3 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
PrevAllocID: uuid.Generate(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc, alloc2})
|
|
|
|
require.Nil(t, err)
|
2018-01-22 22:31:38 +00:00
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2018-01-22 22:31:38 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
2018-01-30 20:45:59 +00:00
|
|
|
// Attempt the GC, job has all terminal allocs and one pending eval
|
2018-01-22 22:31:38 +00:00
|
|
|
gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000)
|
|
|
|
err = core.Process(gc)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.Nil(t, err)
|
2018-01-22 22:31:38 +00:00
|
|
|
|
|
|
|
// Eval should still exist
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.EvalByID(ws, eval.ID)
|
|
|
|
require.Nil(t, err)
|
|
|
|
require.NotNil(t, out)
|
|
|
|
require.Equal(t, eval.ID, out.ID)
|
2018-01-22 22:31:38 +00:00
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA, err := store.AllocByID(ws, alloc.ID)
|
|
|
|
require.Nil(t, err)
|
|
|
|
require.Nil(t, outA)
|
2018-01-22 22:31:38 +00:00
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA2, err := store.AllocByID(ws, alloc2.ID)
|
|
|
|
require.Nil(t, err)
|
|
|
|
require.Equal(t, alloc2.ID, outA2.ID)
|
2018-01-22 22:31:38 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-01-30 15:12:14 +00:00
|
|
|
// Tests GC behavior on stopped job with reschedulable allocs
|
|
|
|
func TestCoreScheduler_EvalGC_StoppedJob_Reschedulable(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2018-01-30 15:12:14 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
// Insert "dead" eval
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2018-01-30 15:12:14 +00:00
|
|
|
eval := mock.Eval()
|
|
|
|
eval.Status = structs.EvalStatusFailed
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))
|
|
|
|
err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})
|
|
|
|
require.Nil(t, err)
|
2018-01-30 15:12:14 +00:00
|
|
|
|
|
|
|
// Insert mock stopped job with default reschedule policy of 2 in 10 minutes
|
|
|
|
job := mock.Job()
|
|
|
|
job.ID = eval.JobID
|
|
|
|
job.Stop = true
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, job)
|
|
|
|
require.Nil(t, err)
|
2018-01-30 15:12:14 +00:00
|
|
|
|
|
|
|
// Insert failed alloc with a recent reschedule attempt
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.EvalID = eval.ID
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusLost
|
|
|
|
alloc.JobID = eval.JobID
|
|
|
|
alloc.TaskGroup = job.TaskGroups[0].Name
|
|
|
|
alloc.RescheduleTracker = &structs.RescheduleTracker{
|
|
|
|
Events: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: time.Now().Add(-3 * time.Minute).UTC().UnixNano(),
|
|
|
|
PrevNodeID: uuid.Generate(),
|
|
|
|
PrevAllocID: uuid.Generate(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1001, []*structs.Allocation{alloc})
|
|
|
|
require.Nil(t, err)
|
2018-01-30 15:12:14 +00:00
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2018-01-30 15:12:14 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000)
|
|
|
|
err = core.Process(gc)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.Nil(t, err)
|
2018-01-30 15:12:14 +00:00
|
|
|
|
|
|
|
// Eval should not exist
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.EvalByID(ws, eval.ID)
|
|
|
|
require.Nil(t, err)
|
|
|
|
require.Nil(t, out)
|
2018-01-30 15:12:14 +00:00
|
|
|
|
|
|
|
// Alloc should not exist
|
2022-04-01 19:17:58 +00:00
|
|
|
outA, err := store.AllocByID(ws, alloc.ID)
|
|
|
|
require.Nil(t, err)
|
|
|
|
require.Nil(t, outA)
|
2018-01-30 15:12:14 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-03-11 23:48:57 +00:00
|
|
|
// An EvalGC should never reap a batch job that has not been stopped
|
2016-06-27 22:47:49 +00:00
|
|
|
func TestCoreScheduler_EvalGC_Batch(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
|
|
|
// Set EvalGCThreshold past BatchEvalThreshold to make sure that only
|
|
|
|
// BatchEvalThreshold affects the results.
|
|
|
|
c.BatchEvalGCThreshold = time.Hour
|
|
|
|
c.EvalGCThreshold = 2 * time.Hour
|
|
|
|
})
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanupS1()
|
2016-06-11 01:32:37 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2016-08-11 21:36:22 +00:00
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
2023-01-31 18:32:14 +00:00
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 2, 10)
|
2016-08-04 18:24:17 +00:00
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
var jobModifyIdx uint64 = 1000
|
2016-08-04 18:24:17 +00:00
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
// A "stopped" job containing one "complete" eval with one terminal allocation.
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2023-01-31 18:32:14 +00:00
|
|
|
stoppedJob := mock.Job()
|
|
|
|
stoppedJob.Type = structs.JobTypeBatch
|
|
|
|
stoppedJob.Status = structs.JobStatusDead
|
|
|
|
stoppedJob.Stop = true
|
|
|
|
stoppedJob.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 0,
|
|
|
|
Interval: 0 * time.Second,
|
2018-11-01 05:02:26 +00:00
|
|
|
}
|
2023-01-31 18:32:14 +00:00
|
|
|
err := store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx+1, stoppedJob)
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
stoppedJobEval := mock.Eval()
|
|
|
|
stoppedJobEval.Status = structs.EvalStatusComplete
|
|
|
|
stoppedJobEval.Type = structs.JobTypeBatch
|
|
|
|
stoppedJobEval.JobID = stoppedJob.ID
|
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Evaluation{stoppedJobEval})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
stoppedJobStoppedAlloc := mock.Alloc()
|
|
|
|
stoppedJobStoppedAlloc.Job = stoppedJob
|
|
|
|
stoppedJobStoppedAlloc.JobID = stoppedJob.ID
|
|
|
|
stoppedJobStoppedAlloc.EvalID = stoppedJobEval.ID
|
|
|
|
stoppedJobStoppedAlloc.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
stoppedJobStoppedAlloc.ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
|
|
|
|
stoppedJobLostAlloc := mock.Alloc()
|
|
|
|
stoppedJobLostAlloc.Job = stoppedJob
|
|
|
|
stoppedJobLostAlloc.JobID = stoppedJob.ID
|
|
|
|
stoppedJobLostAlloc.EvalID = stoppedJobEval.ID
|
|
|
|
stoppedJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
stoppedJobLostAlloc.ClientStatus = structs.AllocClientStatusLost
|
|
|
|
|
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx+3, []*structs.Allocation{stoppedJobStoppedAlloc, stoppedJobLostAlloc})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
// A "dead" job containing one "complete" eval with:
|
|
|
|
// 1. A "stopped" alloc
|
|
|
|
// 2. A "lost" alloc
|
|
|
|
// Both allocs upserted at 1002.
|
|
|
|
deadJob := mock.Job()
|
|
|
|
deadJob.Type = structs.JobTypeBatch
|
|
|
|
deadJob.Status = structs.JobStatusDead
|
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, deadJob)
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
deadJobEval := mock.Eval()
|
|
|
|
deadJobEval.Status = structs.EvalStatusComplete
|
|
|
|
deadJobEval.Type = structs.JobTypeBatch
|
|
|
|
deadJobEval.JobID = deadJob.ID
|
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{deadJobEval})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
stoppedAlloc := mock.Alloc()
|
|
|
|
stoppedAlloc.Job = deadJob
|
|
|
|
stoppedAlloc.JobID = deadJob.ID
|
|
|
|
stoppedAlloc.EvalID = deadJobEval.ID
|
|
|
|
stoppedAlloc.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
stoppedAlloc.ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
|
|
|
|
lostAlloc := mock.Alloc()
|
|
|
|
lostAlloc.Job = deadJob
|
|
|
|
lostAlloc.JobID = deadJob.ID
|
|
|
|
lostAlloc.EvalID = deadJobEval.ID
|
|
|
|
lostAlloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
lostAlloc.ClientStatus = structs.AllocClientStatusLost
|
|
|
|
|
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx+2, []*structs.Allocation{stoppedAlloc, lostAlloc})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
// An "alive" job #2 containing two complete evals. The first with:
|
|
|
|
// 1. A "lost" alloc
|
|
|
|
// 2. A "running" alloc
|
|
|
|
// Both allocs upserted at 999
|
|
|
|
//
|
|
|
|
// The second with just terminal allocs:
|
|
|
|
// 1. A "completed" alloc
|
|
|
|
// All allocs upserted at 999. The eval upserted at 999 as well.
|
|
|
|
activeJob := mock.Job()
|
|
|
|
activeJob.Type = structs.JobTypeBatch
|
|
|
|
activeJob.Status = structs.JobStatusDead
|
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, activeJob)
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
activeJobEval := mock.Eval()
|
|
|
|
activeJobEval.Status = structs.EvalStatusComplete
|
|
|
|
activeJobEval.Type = structs.JobTypeBatch
|
|
|
|
activeJobEval.JobID = activeJob.ID
|
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{activeJobEval})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
activeJobRunningAlloc := mock.Alloc()
|
|
|
|
activeJobRunningAlloc.Job = activeJob
|
|
|
|
activeJobRunningAlloc.JobID = activeJob.ID
|
|
|
|
activeJobRunningAlloc.EvalID = activeJobEval.ID
|
|
|
|
activeJobRunningAlloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
activeJobRunningAlloc.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
|
|
|
|
activeJobLostAlloc := mock.Alloc()
|
|
|
|
activeJobLostAlloc.Job = activeJob
|
|
|
|
activeJobLostAlloc.JobID = activeJob.ID
|
|
|
|
activeJobLostAlloc.EvalID = activeJobEval.ID
|
|
|
|
activeJobLostAlloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
activeJobLostAlloc.ClientStatus = structs.AllocClientStatusLost
|
|
|
|
|
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobRunningAlloc, activeJobLostAlloc})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
activeJobCompleteEval := mock.Eval()
|
|
|
|
activeJobCompleteEval.Status = structs.EvalStatusComplete
|
|
|
|
activeJobCompleteEval.Type = structs.JobTypeBatch
|
|
|
|
activeJobCompleteEval.JobID = activeJob.ID
|
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{activeJobCompleteEval})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
activeJobCompletedEvalCompletedAlloc := mock.Alloc()
|
|
|
|
activeJobCompletedEvalCompletedAlloc.Job = activeJob
|
|
|
|
activeJobCompletedEvalCompletedAlloc.JobID = activeJob.ID
|
|
|
|
activeJobCompletedEvalCompletedAlloc.EvalID = activeJobCompleteEval.ID
|
|
|
|
activeJobCompletedEvalCompletedAlloc.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
activeJobCompletedEvalCompletedAlloc.ClientStatus = structs.AllocClientStatusComplete
|
|
|
|
|
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{activeJobCompletedEvalCompletedAlloc})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
// A job that ran once and was then purged.
|
|
|
|
purgedJob := mock.Job()
|
|
|
|
purgedJob.Type = structs.JobTypeBatch
|
|
|
|
purgedJob.Status = structs.JobStatusDead
|
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, jobModifyIdx, purgedJob)
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
purgedJobEval := mock.Eval()
|
|
|
|
purgedJobEval.Status = structs.EvalStatusComplete
|
|
|
|
purgedJobEval.Type = structs.JobTypeBatch
|
|
|
|
purgedJobEval.JobID = purgedJob.ID
|
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx+1, []*structs.Evaluation{purgedJobEval})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
purgedJobCompleteAlloc := mock.Alloc()
|
|
|
|
purgedJobCompleteAlloc.Job = purgedJob
|
|
|
|
purgedJobCompleteAlloc.JobID = purgedJob.ID
|
|
|
|
purgedJobCompleteAlloc.EvalID = purgedJobEval.ID
|
|
|
|
purgedJobCompleteAlloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
purgedJobCompleteAlloc.ClientStatus = structs.AllocClientStatusLost
|
|
|
|
|
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Allocation{purgedJobCompleteAlloc})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
purgedJobCompleteEval := mock.Eval()
|
|
|
|
purgedJobCompleteEval.Status = structs.EvalStatusComplete
|
|
|
|
purgedJobCompleteEval.Type = structs.JobTypeBatch
|
|
|
|
purgedJobCompleteEval.JobID = purgedJob.ID
|
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, jobModifyIdx-1, []*structs.Evaluation{purgedJobCompleteEval})
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
// Purge job.
|
|
|
|
err = store.DeleteJob(jobModifyIdx, purgedJob.Namespace, purgedJob.ID)
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
// A little helper for assertions
|
|
|
|
assertCorrectJobEvalAlloc := func(
|
|
|
|
ws memdb.WatchSet,
|
|
|
|
jobsShouldExist []*structs.Job,
|
|
|
|
jobsShouldNotExist []*structs.Job,
|
|
|
|
evalsShouldExist []*structs.Evaluation,
|
|
|
|
evalsShouldNotExist []*structs.Evaluation,
|
|
|
|
allocsShouldExist []*structs.Allocation,
|
|
|
|
allocsShouldNotExist []*structs.Allocation,
|
|
|
|
) {
|
|
|
|
t.Helper()
|
|
|
|
for _, job := range jobsShouldExist {
|
|
|
|
out, err := store.JobByID(ws, job.Namespace, job.ID)
|
|
|
|
must.NoError(t, err)
|
|
|
|
must.NotNil(t, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, job := range jobsShouldNotExist {
|
|
|
|
out, err := store.JobByID(ws, job.Namespace, job.ID)
|
|
|
|
must.NoError(t, err)
|
|
|
|
must.Nil(t, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, eval := range evalsShouldExist {
|
|
|
|
out, err := store.EvalByID(ws, eval.ID)
|
|
|
|
must.NoError(t, err)
|
|
|
|
must.NotNil(t, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, eval := range evalsShouldNotExist {
|
|
|
|
out, err := store.EvalByID(ws, eval.ID)
|
|
|
|
must.NoError(t, err)
|
|
|
|
must.Nil(t, out)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, alloc := range allocsShouldExist {
|
|
|
|
outA, err := store.AllocByID(ws, alloc.ID)
|
|
|
|
must.NoError(t, err)
|
|
|
|
must.NotNil(t, outA)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, alloc := range allocsShouldNotExist {
|
|
|
|
outA, err := store.AllocByID(ws, alloc.ID)
|
|
|
|
must.NoError(t, err)
|
|
|
|
must.Nil(t, outA)
|
|
|
|
}
|
2018-11-01 05:02:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2023-01-31 18:32:14 +00:00
|
|
|
must.NoError(t, err)
|
2018-11-01 05:02:26 +00:00
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
// Attempt the GC without moving the time at all
|
|
|
|
gc := s1.coreJobEval(structs.CoreJobEvalGC, jobModifyIdx)
|
2018-11-01 05:02:26 +00:00
|
|
|
err = core.Process(gc)
|
2023-01-31 18:32:14 +00:00
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
// Nothing is gone
|
|
|
|
assertCorrectJobEvalAlloc(
|
|
|
|
memdb.NewWatchSet(),
|
|
|
|
[]*structs.Job{deadJob, activeJob, stoppedJob},
|
|
|
|
[]*structs.Job{},
|
|
|
|
[]*structs.Evaluation{
|
|
|
|
deadJobEval,
|
|
|
|
activeJobEval, activeJobCompleteEval,
|
|
|
|
stoppedJobEval,
|
|
|
|
purgedJobEval,
|
|
|
|
},
|
|
|
|
[]*structs.Evaluation{},
|
|
|
|
[]*structs.Allocation{
|
|
|
|
stoppedAlloc, lostAlloc,
|
|
|
|
activeJobRunningAlloc, activeJobLostAlloc, activeJobCompletedEvalCompletedAlloc,
|
|
|
|
stoppedJobStoppedAlloc, stoppedJobLostAlloc,
|
|
|
|
purgedJobCompleteAlloc,
|
|
|
|
},
|
|
|
|
[]*structs.Allocation{},
|
|
|
|
)
|
2017-03-11 23:48:57 +00:00
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
// Update the time tables by half of the BatchEvalGCThreshold which is too
|
|
|
|
// small to GC anything.
|
2017-03-11 23:48:57 +00:00
|
|
|
tt := s1.fsm.TimeTable()
|
2023-01-31 18:32:14 +00:00
|
|
|
tt.Witness(2*jobModifyIdx, time.Now().UTC().Add((-1)*s1.config.BatchEvalGCThreshold/2))
|
2017-03-11 23:48:57 +00:00
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
gc = s1.coreJobEval(structs.CoreJobEvalGC, jobModifyIdx*2)
|
2017-03-11 23:48:57 +00:00
|
|
|
err = core.Process(gc)
|
2023-01-31 18:32:14 +00:00
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
// Nothing is gone.
|
|
|
|
assertCorrectJobEvalAlloc(
|
|
|
|
memdb.NewWatchSet(),
|
|
|
|
[]*structs.Job{deadJob, activeJob, stoppedJob},
|
|
|
|
[]*structs.Job{},
|
|
|
|
[]*structs.Evaluation{
|
|
|
|
deadJobEval,
|
|
|
|
activeJobEval, activeJobCompleteEval,
|
|
|
|
stoppedJobEval,
|
|
|
|
purgedJobEval,
|
|
|
|
},
|
|
|
|
[]*structs.Evaluation{},
|
|
|
|
[]*structs.Allocation{
|
|
|
|
stoppedAlloc, lostAlloc,
|
|
|
|
activeJobRunningAlloc, activeJobLostAlloc, activeJobCompletedEvalCompletedAlloc,
|
|
|
|
stoppedJobStoppedAlloc, stoppedJobLostAlloc,
|
|
|
|
purgedJobCompleteAlloc,
|
|
|
|
},
|
|
|
|
[]*structs.Allocation{},
|
|
|
|
)
|
2017-03-11 23:48:57 +00:00
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
// Update the time tables so that BatchEvalGCThreshold has elapsed.
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 2, 10)
|
|
|
|
tt = s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2*jobModifyIdx, time.Now().UTC().Add(-1*s1.config.BatchEvalGCThreshold))
|
2017-03-11 23:48:57 +00:00
|
|
|
|
2023-01-31 18:32:14 +00:00
|
|
|
gc = s1.coreJobEval(structs.CoreJobEvalGC, jobModifyIdx*2)
|
|
|
|
err = core.Process(gc)
|
|
|
|
must.NoError(t, err)
|
|
|
|
|
|
|
|
// We expect the following:
|
|
|
|
//
|
|
|
|
// 1. The stopped job remains, but its evaluation and allocations are both removed.
|
|
|
|
// 2. The dead job remains with its evaluation and allocations intact. This is because
|
|
|
|
// for them the BatchEvalGCThreshold has not yet elapsed (their modification idx are larger
|
|
|
|
// than that of the job).
|
|
|
|
// 3. The active job remains since it is active, even though the allocations are otherwise
|
|
|
|
// eligible for GC. However, the inactive allocation is GCed for it.
|
|
|
|
// 4. The eval and allocation for the purged job are GCed.
|
|
|
|
assertCorrectJobEvalAlloc(
|
|
|
|
memdb.NewWatchSet(),
|
|
|
|
[]*structs.Job{deadJob, activeJob, stoppedJob},
|
|
|
|
[]*structs.Job{},
|
|
|
|
[]*structs.Evaluation{deadJobEval, activeJobEval},
|
|
|
|
[]*structs.Evaluation{activeJobCompleteEval, stoppedJobEval, purgedJobEval},
|
|
|
|
[]*structs.Allocation{stoppedAlloc, lostAlloc, activeJobRunningAlloc},
|
|
|
|
[]*structs.Allocation{
|
|
|
|
activeJobLostAlloc, activeJobCompletedEvalCompletedAlloc,
|
|
|
|
stoppedJobLostAlloc, stoppedJobLostAlloc,
|
|
|
|
purgedJobCompleteAlloc,
|
|
|
|
})
|
2017-03-11 23:48:57 +00:00
|
|
|
}
|
|
|
|
|
2016-06-27 22:47:49 +00:00
|
|
|
func TestCoreScheduler_EvalGC_Partial(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-03-25 23:46:48 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
2022-04-01 19:17:58 +00:00
|
|
|
|
2016-08-11 21:36:22 +00:00
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2016-03-25 23:46:48 +00:00
|
|
|
// Insert "dead" eval
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2016-03-25 23:46:48 +00:00
|
|
|
eval := mock.Eval()
|
2016-06-27 22:47:49 +00:00
|
|
|
eval.Status = structs.EvalStatusComplete
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))
|
|
|
|
err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})
|
2016-03-25 23:46:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-01-30 20:45:59 +00:00
|
|
|
// Create mock job with id same as eval
|
2018-01-22 22:31:38 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.ID = eval.JobID
|
|
|
|
|
2016-06-27 22:47:49 +00:00
|
|
|
// Insert "dead" alloc
|
2016-03-25 23:46:48 +00:00
|
|
|
alloc := mock.Alloc()
|
2018-01-30 20:45:59 +00:00
|
|
|
alloc.JobID = job.ID
|
2016-03-25 23:46:48 +00:00
|
|
|
alloc.EvalID = eval.ID
|
2016-07-13 19:20:46 +00:00
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc.TaskGroup = job.TaskGroups[0].Name
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID))
|
2016-08-04 18:24:17 +00:00
|
|
|
|
|
|
|
// Insert "lost" alloc
|
|
|
|
alloc2 := mock.Alloc()
|
2018-01-30 20:45:59 +00:00
|
|
|
alloc2.JobID = job.ID
|
2016-08-04 18:24:17 +00:00
|
|
|
alloc2.EvalID = eval.ID
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc2.TaskGroup = job.TaskGroups[0].Name
|
2016-08-04 18:24:17 +00:00
|
|
|
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc2.ClientStatus = structs.AllocClientStatusLost
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc, alloc2})
|
2016-06-27 22:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-06-22 18:40:27 +00:00
|
|
|
|
|
|
|
// Insert "running" alloc
|
2016-08-04 18:24:17 +00:00
|
|
|
alloc3 := mock.Alloc()
|
|
|
|
alloc3.EvalID = eval.ID
|
2018-01-30 20:45:59 +00:00
|
|
|
alloc3.JobID = job.ID
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(1003, mock.JobSummary(alloc3.JobID))
|
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1004, []*structs.Allocation{alloc3})
|
2016-03-25 23:46:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-01-30 20:45:59 +00:00
|
|
|
// Insert mock job with rescheduling disabled
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 0,
|
|
|
|
Interval: 0 * time.Second,
|
|
|
|
}
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, job)
|
|
|
|
require.Nil(t, err)
|
2018-01-30 20:45:59 +00:00
|
|
|
|
2016-03-25 23:46:48 +00:00
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2016-03-25 23:46:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
2016-06-22 16:04:22 +00:00
|
|
|
gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000)
|
2016-03-25 23:46:48 +00:00
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-06-27 22:47:49 +00:00
|
|
|
// Should not be gone
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.EvalByID(ws, eval.ID)
|
2016-03-25 23:46:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA, err := store.AllocByID(ws, alloc3.ID)
|
2016-03-25 23:46:48 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA == nil {
|
|
|
|
t.Fatalf("bad: %v", outA)
|
|
|
|
}
|
2016-04-08 18:42:02 +00:00
|
|
|
|
2016-06-27 22:47:49 +00:00
|
|
|
// Should be gone
|
2022-04-01 19:17:58 +00:00
|
|
|
outB, err := store.AllocByID(ws, alloc.ID)
|
2016-04-08 18:42:02 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2016-06-27 22:47:49 +00:00
|
|
|
if outB != nil {
|
|
|
|
t.Fatalf("bad: %v", outB)
|
2016-04-08 18:42:02 +00:00
|
|
|
}
|
2016-08-04 18:24:17 +00:00
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outC, err := store.AllocByID(ws, alloc2.ID)
|
2016-08-04 18:24:17 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outC != nil {
|
|
|
|
t.Fatalf("bad: %v", outC)
|
|
|
|
}
|
2016-04-08 18:42:02 +00:00
|
|
|
}
|
|
|
|
|
2016-02-20 23:50:41 +00:00
|
|
|
func TestCoreScheduler_EvalGC_Force(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2017-10-23 22:04:00 +00:00
|
|
|
for _, withAcl := range []bool{false, true} {
|
|
|
|
t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) {
|
|
|
|
var server *Server
|
2019-12-04 00:15:11 +00:00
|
|
|
var cleanup func()
|
2017-10-23 22:04:00 +00:00
|
|
|
if withAcl {
|
2019-12-04 00:15:11 +00:00
|
|
|
server, _, cleanup = TestACLServer(t, nil)
|
2017-10-23 22:04:00 +00:00
|
|
|
} else {
|
2019-12-04 00:15:11 +00:00
|
|
|
server, cleanup = TestServer(t, nil)
|
2017-10-23 22:04:00 +00:00
|
|
|
}
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanup()
|
2017-10-23 22:04:00 +00:00
|
|
|
testutil.WaitForLeader(t, server.RPC)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
server.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
// Insert "dead" eval
|
2022-04-01 19:17:58 +00:00
|
|
|
store := server.fsm.State()
|
2017-10-23 22:04:00 +00:00
|
|
|
eval := mock.Eval()
|
|
|
|
eval.Status = structs.EvalStatusFailed
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(999, mock.JobSummary(eval.JobID))
|
|
|
|
err := store.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-01-22 22:31:38 +00:00
|
|
|
// Insert mock job with rescheduling disabled
|
|
|
|
job := mock.Job()
|
|
|
|
job.ID = eval.JobID
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 0,
|
|
|
|
Interval: 0 * time.Second,
|
|
|
|
}
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, 1001, job)
|
|
|
|
require.Nil(t, err)
|
2018-01-22 22:31:38 +00:00
|
|
|
|
2017-10-23 22:04:00 +00:00
|
|
|
// Insert "dead" alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.EvalID = eval.ID
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc.TaskGroup = job.TaskGroups[0].Name
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID))
|
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc})
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(server, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := server.coreJobEval(structs.CoreJobForceGC, 1002)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be gone
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.EvalByID(ws, eval.ID)
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA, err := store.AllocByID(ws, alloc.ID)
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA != nil {
|
|
|
|
t.Fatalf("bad: %v", outA)
|
|
|
|
}
|
|
|
|
})
|
2016-02-20 23:50:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-07 18:01:29 +00:00
|
|
|
func TestCoreScheduler_NodeGC(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2017-10-23 22:04:00 +00:00
|
|
|
for _, withAcl := range []bool{false, true} {
|
|
|
|
t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) {
|
|
|
|
var server *Server
|
2019-12-04 00:15:11 +00:00
|
|
|
var cleanup func()
|
2017-10-23 22:04:00 +00:00
|
|
|
if withAcl {
|
2019-12-04 00:15:11 +00:00
|
|
|
server, _, cleanup = TestACLServer(t, nil)
|
2017-10-23 22:04:00 +00:00
|
|
|
} else {
|
2019-12-04 00:15:11 +00:00
|
|
|
server, cleanup = TestServer(t, nil)
|
2017-10-23 22:04:00 +00:00
|
|
|
}
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanup()
|
2017-10-23 22:04:00 +00:00
|
|
|
testutil.WaitForLeader(t, server.RPC)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
server.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
// Insert "dead" node
|
2022-04-01 19:17:58 +00:00
|
|
|
store := server.fsm.State()
|
2017-10-23 22:04:00 +00:00
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusDown
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertNode(structs.MsgTypeTestSetup, 1000, node)
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := server.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*server.config.NodeGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(server, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := server.coreJobEval(structs.CoreJobNodeGC, 2000)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be gone
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.NodeByID(ws, node.ID)
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
})
|
2015-09-07 18:01:29 +00:00
|
|
|
}
|
|
|
|
}
|
2015-12-15 03:20:57 +00:00
|
|
|
|
2016-06-03 23:24:41 +00:00
|
|
|
func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-06-03 23:24:41 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2016-08-11 21:36:22 +00:00
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2016-06-03 23:24:41 +00:00
|
|
|
// Insert "dead" node
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2016-06-03 23:24:41 +00:00
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusDown
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertNode(structs.MsgTypeTestSetup, 1000, node)
|
2016-06-03 23:24:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert a terminal alloc on that node
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID))
|
|
|
|
if err := store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}); err != nil {
|
2016-06-03 23:24:41 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.NodeGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2016-06-03 23:24:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
2016-06-22 16:04:22 +00:00
|
|
|
gc := s1.coreJobEval(structs.CoreJobNodeGC, 2000)
|
2016-06-03 23:24:41 +00:00
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be gone
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.NodeByID(ws, node.ID)
|
2016-06-03 23:24:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-06-03 23:24:41 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2016-08-11 21:36:22 +00:00
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2016-06-03 23:24:41 +00:00
|
|
|
// Insert "dead" node
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2016-06-03 23:24:41 +00:00
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusDown
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertNode(structs.MsgTypeTestSetup, 1000, node)
|
2016-06-03 23:24:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert a running alloc on that node
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.NodeID = node.ID
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID))
|
|
|
|
if err := store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc}); err != nil {
|
2016-06-03 23:24:41 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.NodeGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2016-06-03 23:24:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
2016-06-22 16:04:22 +00:00
|
|
|
gc := s1.coreJobEval(structs.CoreJobNodeGC, 2000)
|
2016-06-03 23:24:41 +00:00
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should still be here
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.NodeByID(ws, node.ID)
|
2016-06-03 23:24:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-20 23:50:41 +00:00
|
|
|
func TestCoreScheduler_NodeGC_Force(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-02-20 23:50:41 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2016-08-11 21:36:22 +00:00
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2016-02-20 23:50:41 +00:00
|
|
|
// Insert "dead" node
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2016-02-20 23:50:41 +00:00
|
|
|
node := mock.Node()
|
|
|
|
node.Status = structs.NodeStatusDown
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertNode(structs.MsgTypeTestSetup, 1000, node)
|
2016-02-20 23:50:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2016-02-20 23:50:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
2016-06-22 16:04:22 +00:00
|
|
|
gc := s1.coreJobEval(structs.CoreJobForceGC, 1000)
|
2016-02-20 23:50:41 +00:00
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should be gone
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.NodeByID(ws, node.ID)
|
2016-02-20 23:50:41 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-25 21:56:23 +00:00
|
|
|
func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-07-25 21:56:23 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2016-08-11 21:36:22 +00:00
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2016-07-25 21:56:23 +00:00
|
|
|
// Insert job.
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2016-07-25 21:56:23 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.Status = structs.JobStatusDead
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert two evals, one terminal and one not
|
|
|
|
eval := mock.Eval()
|
|
|
|
eval.JobID = job.ID
|
|
|
|
eval.Status = structs.EvalStatusComplete
|
|
|
|
|
|
|
|
eval2 := mock.Eval()
|
|
|
|
eval2.JobID = job.ID
|
|
|
|
eval2.Status = structs.EvalStatusPending
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval, eval2})
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := s1.coreJobEval(structs.CoreJobJobGC, 2000)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should still exist
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.JobByID(ws, job.Namespace, job.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outE, err := store.EvalByID(ws, eval.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outE == nil {
|
|
|
|
t.Fatalf("bad: %v", outE)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outE2, err := store.EvalByID(ws, eval2.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outE2 == nil {
|
|
|
|
t.Fatalf("bad: %v", outE2)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the second eval to be terminal
|
|
|
|
eval2.Status = structs.EvalStatusComplete
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, 1003, []*structs.Evaluation{eval2})
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err = store.Snapshot()
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core = NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc = s1.coreJobEval(structs.CoreJobJobGC, 2000)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should not still exist
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err = store.JobByID(ws, job.Namespace, job.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outE, err = store.EvalByID(ws, eval.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outE != nil {
|
|
|
|
t.Fatalf("bad: %v", outE)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outE2, err = store.EvalByID(ws, eval2.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outE2 != nil {
|
|
|
|
t.Fatalf("bad: %v", outE2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-07-25 21:56:23 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2016-08-11 21:36:22 +00:00
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2016-07-25 21:56:23 +00:00
|
|
|
// Insert job.
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2016-07-25 21:56:23 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.Status = structs.JobStatusDead
|
2018-01-22 22:31:38 +00:00
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 0,
|
|
|
|
Interval: 0 * time.Second,
|
|
|
|
}
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert an eval
|
|
|
|
eval := mock.Eval()
|
|
|
|
eval.JobID = job.ID
|
|
|
|
eval.Status = structs.EvalStatusComplete
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval})
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert two allocs, one terminal and one not
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.EvalID = eval.ID
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc.TaskGroup = job.TaskGroups[0].Name
|
2016-07-25 21:56:23 +00:00
|
|
|
|
|
|
|
alloc2 := mock.Alloc()
|
|
|
|
alloc2.JobID = job.ID
|
|
|
|
alloc2.EvalID = eval.ID
|
|
|
|
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
alloc2.ClientStatus = structs.AllocClientStatusRunning
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc2.TaskGroup = job.TaskGroups[0].Name
|
2016-07-25 21:56:23 +00:00
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc, alloc2})
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := s1.coreJobEval(structs.CoreJobJobGC, 2000)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should still exist
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.JobByID(ws, job.Namespace, job.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA, err := store.AllocByID(ws, alloc.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA == nil {
|
|
|
|
t.Fatalf("bad: %v", outA)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA2, err := store.AllocByID(ws, alloc2.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA2 == nil {
|
|
|
|
t.Fatalf("bad: %v", outA2)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the second alloc to be terminal
|
|
|
|
alloc2.ClientStatus = structs.AllocClientStatusComplete
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{alloc2})
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err = store.Snapshot()
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core = NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc = s1.coreJobEval(structs.CoreJobJobGC, 2000)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should not still exist
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err = store.JobByID(ws, job.Namespace, job.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA, err = store.AllocByID(ws, alloc.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA != nil {
|
|
|
|
t.Fatalf("bad: %v", outA)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA2, err = store.AllocByID(ws, alloc2.ID)
|
2016-07-25 21:56:23 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA2 != nil {
|
|
|
|
t.Fatalf("bad: %v", outA2)
|
2015-12-15 03:20:57 +00:00
|
|
|
}
|
|
|
|
}
|
2016-02-20 23:50:41 +00:00
|
|
|
|
2016-06-27 22:47:49 +00:00
|
|
|
// This test ensures that batch jobs are GC'd in one shot, meaning it all
|
|
|
|
// allocs/evals and job or nothing
|
|
|
|
func TestCoreScheduler_JobGC_OneShot(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-06-27 22:47:49 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2016-08-11 21:36:22 +00:00
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2016-06-27 22:47:49 +00:00
|
|
|
// Insert job.
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2016-06-27 22:47:49 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job)
|
2016-06-27 22:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert two complete evals
|
|
|
|
eval := mock.Eval()
|
|
|
|
eval.JobID = job.ID
|
|
|
|
eval.Status = structs.EvalStatusComplete
|
|
|
|
|
|
|
|
eval2 := mock.Eval()
|
|
|
|
eval2.JobID = job.ID
|
|
|
|
eval2.Status = structs.EvalStatusComplete
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval, eval2})
|
2016-06-27 22:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert one complete alloc and one running on distinct evals
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.EvalID = eval.ID
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
|
|
|
|
alloc2 := mock.Alloc()
|
|
|
|
alloc2.JobID = job.ID
|
|
|
|
alloc2.EvalID = eval2.ID
|
|
|
|
alloc2.DesiredStatus = structs.AllocDesiredStatusRun
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc, alloc2})
|
2016-06-27 22:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Force the jobs state to dead
|
|
|
|
job.Status = structs.JobStatusDead
|
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2016-06-27 22:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := s1.coreJobEval(structs.CoreJobJobGC, 2000)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should still exist
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.JobByID(ws, job.Namespace, job.ID)
|
2016-06-27 22:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outE, err := store.EvalByID(ws, eval.ID)
|
2016-06-27 22:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outE == nil {
|
|
|
|
t.Fatalf("bad: %v", outE)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outE2, err := store.EvalByID(ws, eval2.ID)
|
2016-06-27 22:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outE2 == nil {
|
|
|
|
t.Fatalf("bad: %v", outE2)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA, err := store.AllocByID(ws, alloc.ID)
|
2016-06-27 22:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA == nil {
|
|
|
|
t.Fatalf("bad: %v", outA)
|
|
|
|
}
|
2022-04-01 19:17:58 +00:00
|
|
|
outA2, err := store.AllocByID(ws, alloc2.ID)
|
2016-06-27 22:47:49 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA2 == nil {
|
|
|
|
t.Fatalf("bad: %v", outA2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-15 23:47:19 +00:00
|
|
|
// This test ensures that stopped jobs are GCd
|
|
|
|
func TestCoreScheduler_JobGC_Stopped(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-04-15 23:47:19 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
// Insert job.
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2017-04-15 23:47:19 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.Stop = true
|
2018-01-22 22:31:38 +00:00
|
|
|
job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{
|
|
|
|
Attempts: 0,
|
|
|
|
Interval: 0 * time.Second,
|
|
|
|
}
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job)
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert two complete evals
|
|
|
|
eval := mock.Eval()
|
|
|
|
eval.JobID = job.ID
|
|
|
|
eval.Status = structs.EvalStatusComplete
|
|
|
|
|
|
|
|
eval2 := mock.Eval()
|
|
|
|
eval2.JobID = job.ID
|
|
|
|
eval2.Status = structs.EvalStatusComplete
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval, eval2})
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert one complete alloc
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.JobID = job.ID
|
|
|
|
alloc.EvalID = eval.ID
|
|
|
|
alloc.DesiredStatus = structs.AllocDesiredStatusStop
|
2018-01-22 22:31:38 +00:00
|
|
|
alloc.TaskGroup = job.TaskGroups[0].Name
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertAllocs(structs.MsgTypeTestSetup, 1002, []*structs.Allocation{alloc})
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := s1.coreJobEval(structs.CoreJobJobGC, 2000)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shouldn't still exist
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.JobByID(ws, job.Namespace, job.ID)
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outE, err := store.EvalByID(ws, eval.ID)
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outE != nil {
|
|
|
|
t.Fatalf("bad: %v", outE)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outE2, err := store.EvalByID(ws, eval2.ID)
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outE2 != nil {
|
|
|
|
t.Fatalf("bad: %v", outE2)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outA, err := store.AllocByID(ws, alloc.ID)
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outA != nil {
|
|
|
|
t.Fatalf("bad: %v", outA)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-20 23:50:41 +00:00
|
|
|
func TestCoreScheduler_JobGC_Force(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2017-10-23 22:04:00 +00:00
|
|
|
for _, withAcl := range []bool{false, true} {
|
|
|
|
t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) {
|
|
|
|
var server *Server
|
2019-12-04 00:15:11 +00:00
|
|
|
var cleanup func()
|
2017-10-23 22:04:00 +00:00
|
|
|
if withAcl {
|
2019-12-04 00:15:11 +00:00
|
|
|
server, _, cleanup = TestACLServer(t, nil)
|
2017-10-23 22:04:00 +00:00
|
|
|
} else {
|
2019-12-04 00:15:11 +00:00
|
|
|
server, cleanup = TestServer(t, nil)
|
2017-10-23 22:04:00 +00:00
|
|
|
}
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanup()
|
2017-10-23 22:04:00 +00:00
|
|
|
testutil.WaitForLeader(t, server.RPC)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
server.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
// Insert job.
|
2022-04-01 19:17:58 +00:00
|
|
|
store := server.fsm.State()
|
2017-10-23 22:04:00 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.Status = structs.JobStatusDead
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job)
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert a terminal eval
|
|
|
|
eval := mock.Eval()
|
|
|
|
eval.JobID = job.ID
|
|
|
|
eval.Status = structs.EvalStatusComplete
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, 1001, []*structs.Evaluation{eval})
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(server, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := server.coreJobEval(structs.CoreJobForceGC, 1002)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shouldn't still exist
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.JobByID(ws, job.Namespace, job.ID)
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
outE, err := store.EvalByID(ws, eval.ID)
|
2017-10-23 22:04:00 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if outE != nil {
|
|
|
|
t.Fatalf("bad: %v", outE)
|
|
|
|
}
|
|
|
|
})
|
2016-02-20 23:50:41 +00:00
|
|
|
}
|
|
|
|
}
|
2016-03-30 22:17:13 +00:00
|
|
|
|
2017-04-15 23:47:19 +00:00
|
|
|
// This test ensures parameterized jobs only get gc'd when stopped
|
|
|
|
func TestCoreScheduler_JobGC_Parameterized(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-01-26 19:57:32 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
// Insert a parameterized job.
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2017-01-26 19:57:32 +00:00
|
|
|
job := mock.Job()
|
|
|
|
job.Type = structs.JobTypeBatch
|
|
|
|
job.Status = structs.JobStatusRunning
|
|
|
|
job.ParameterizedJob = &structs.ParameterizedJobConfig{
|
|
|
|
Payload: structs.DispatchPayloadRequired,
|
|
|
|
}
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job)
|
2017-01-26 19:57:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-04-15 23:47:19 +00:00
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := s1.coreJobEval(structs.CoreJobForceGC, 1002)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should still exist
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.JobByID(ws, job.Namespace, job.ID)
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark the job as stopped and try again
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.Stop = true
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, 2000, job2)
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err = store.Snapshot()
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core = NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc = s1.coreJobEval(structs.CoreJobForceGC, 2002)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should not exist
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err = store.JobByID(ws, job.Namespace, job.ID)
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %+v", out)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-11 17:36:28 +00:00
|
|
|
// This test ensures periodic jobs don't get GCd until they are stopped
|
2017-04-15 23:47:19 +00:00
|
|
|
func TestCoreScheduler_JobGC_Periodic(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2017-04-15 23:47:19 +00:00
|
|
|
|
2019-12-04 00:15:11 +00:00
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-04-15 23:47:19 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
// Insert a parameterized job.
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2017-04-15 23:47:19 +00:00
|
|
|
job := mock.PeriodicJob()
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertJob(structs.MsgTypeTestSetup, 1000, job)
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
2017-01-26 19:57:32 +00:00
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2017-01-26 19:57:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := s1.coreJobEval(structs.CoreJobForceGC, 1002)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should still exist
|
2017-02-08 05:22:48 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.JobByID(ws, job.Namespace, job.ID)
|
2017-01-26 19:57:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out == nil {
|
|
|
|
t.Fatalf("bad: %v", out)
|
|
|
|
}
|
|
|
|
|
2017-04-15 23:47:19 +00:00
|
|
|
// Mark the job as stopped and try again
|
|
|
|
job2 := job.Copy()
|
|
|
|
job2.Stop = true
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, 2000, job2)
|
2017-01-26 19:57:32 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
2017-04-15 23:47:19 +00:00
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err = store.Snapshot()
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core = NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc = s1.coreJobEval(structs.CoreJobForceGC, 2002)
|
|
|
|
err = core.Process(gc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Should not exist
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err = store.JobByID(ws, job.Namespace, job.ID)
|
2017-04-15 23:47:19 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
if out != nil {
|
|
|
|
t.Fatalf("bad: %+v", out)
|
2017-01-26 19:57:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-29 19:32:37 +00:00
|
|
|
func TestCoreScheduler_DeploymentGC(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-06-29 19:32:37 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2018-03-11 17:57:49 +00:00
|
|
|
// Insert an active, terminal, and terminal with allocations deployment
|
2022-04-01 19:17:58 +00:00
|
|
|
store := s1.fsm.State()
|
2017-07-14 20:02:39 +00:00
|
|
|
d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment()
|
2017-06-29 19:32:37 +00:00
|
|
|
d1.Status = structs.DeploymentStatusFailed
|
2017-07-14 20:02:39 +00:00
|
|
|
d3.Status = structs.DeploymentStatusSuccessful
|
2022-04-01 19:17:58 +00:00
|
|
|
assert.Nil(store.UpsertDeployment(1000, d1), "UpsertDeployment")
|
|
|
|
assert.Nil(store.UpsertDeployment(1001, d2), "UpsertDeployment")
|
|
|
|
assert.Nil(store.UpsertDeployment(1002, d3), "UpsertDeployment")
|
2017-07-14 20:02:39 +00:00
|
|
|
|
|
|
|
a := mock.Alloc()
|
|
|
|
a.JobID = d3.JobID
|
|
|
|
a.DeploymentID = d3.ID
|
2022-04-01 19:17:58 +00:00
|
|
|
assert.Nil(store.UpsertAllocs(structs.MsgTypeTestSetup, 1003, []*structs.Allocation{a}), "UpsertAllocs")
|
2017-06-29 19:32:37 +00:00
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := s1.fsm.TimeTable()
|
|
|
|
tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.DeploymentGCThreshold))
|
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2017-06-29 19:32:37 +00:00
|
|
|
assert.Nil(err, "Snapshot")
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := s1.coreJobEval(structs.CoreJobDeploymentGC, 2000)
|
|
|
|
assert.Nil(core.Process(gc), "Process GC")
|
|
|
|
|
|
|
|
// Should be gone
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.DeploymentByID(ws, d1.ID)
|
2017-06-29 19:32:37 +00:00
|
|
|
assert.Nil(err, "DeploymentByID")
|
|
|
|
assert.Nil(out, "Terminal Deployment")
|
2022-04-01 19:17:58 +00:00
|
|
|
out2, err := store.DeploymentByID(ws, d2.ID)
|
2017-06-29 19:32:37 +00:00
|
|
|
assert.Nil(err, "DeploymentByID")
|
|
|
|
assert.NotNil(out2, "Active Deployment")
|
2022-04-01 19:17:58 +00:00
|
|
|
out3, err := store.DeploymentByID(ws, d3.ID)
|
2017-07-14 20:02:39 +00:00
|
|
|
assert.Nil(err, "DeploymentByID")
|
|
|
|
assert.NotNil(out3, "Terminal Deployment With Allocs")
|
2017-06-29 19:32:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestCoreScheduler_DeploymentGC_Force(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2017-10-23 22:04:00 +00:00
|
|
|
for _, withAcl := range []bool{false, true} {
|
|
|
|
t.Run(fmt.Sprintf("with acl %v", withAcl), func(t *testing.T) {
|
|
|
|
var server *Server
|
2019-12-04 00:15:11 +00:00
|
|
|
var cleanup func()
|
2017-10-23 22:04:00 +00:00
|
|
|
if withAcl {
|
2019-12-04 00:15:11 +00:00
|
|
|
server, _, cleanup = TestACLServer(t, nil)
|
2017-10-23 22:04:00 +00:00
|
|
|
} else {
|
2019-12-04 00:15:11 +00:00
|
|
|
server, cleanup = TestServer(t, nil)
|
2017-10-23 22:04:00 +00:00
|
|
|
}
|
2019-12-04 00:15:11 +00:00
|
|
|
defer cleanup()
|
2017-10-23 22:04:00 +00:00
|
|
|
testutil.WaitForLeader(t, server.RPC)
|
|
|
|
assert := assert.New(t)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
server.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
// Insert terminal and active deployment
|
2022-04-01 19:17:58 +00:00
|
|
|
store := server.fsm.State()
|
2017-10-23 22:04:00 +00:00
|
|
|
d1, d2 := mock.Deployment(), mock.Deployment()
|
|
|
|
d1.Status = structs.DeploymentStatusFailed
|
2022-04-01 19:17:58 +00:00
|
|
|
assert.Nil(store.UpsertDeployment(1000, d1), "UpsertDeployment")
|
|
|
|
assert.Nil(store.UpsertDeployment(1001, d2), "UpsertDeployment")
|
2017-10-23 22:04:00 +00:00
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
2017-10-23 22:04:00 +00:00
|
|
|
assert.Nil(err, "Snapshot")
|
|
|
|
core := NewCoreScheduler(server, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
gc := server.coreJobEval(structs.CoreJobForceGC, 1000)
|
|
|
|
assert.Nil(core.Process(gc), "Process Force GC")
|
|
|
|
|
|
|
|
// Should be gone
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
out, err := store.DeploymentByID(ws, d1.ID)
|
2017-10-23 22:04:00 +00:00
|
|
|
assert.Nil(err, "DeploymentByID")
|
|
|
|
assert.Nil(out, "Terminal Deployment")
|
2022-04-01 19:17:58 +00:00
|
|
|
out2, err := store.DeploymentByID(ws, d2.ID)
|
2017-10-23 22:04:00 +00:00
|
|
|
assert.Nil(err, "DeploymentByID")
|
|
|
|
assert.NotNil(out2, "Active Deployment")
|
|
|
|
})
|
|
|
|
}
|
2017-06-29 19:32:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestCoreScheduler_PartitionEvalReap(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2016-03-30 22:17:13 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
2016-08-11 21:36:22 +00:00
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2016-03-30 22:17:13 +00:00
|
|
|
// Create a core scheduler
|
|
|
|
snap, err := s1.fsm.State().Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
evals := []string{"a", "b", "c"}
|
|
|
|
allocs := []string{"1", "2", "3"}
|
2023-02-10 14:26:00 +00:00
|
|
|
|
|
|
|
// Set the max ids per reap to something lower.
|
|
|
|
requests := core.(*CoreScheduler).partitionEvalReap(evals, allocs, 2)
|
2016-03-30 22:17:13 +00:00
|
|
|
if len(requests) != 3 {
|
|
|
|
t.Fatalf("Expected 3 requests got: %v", requests)
|
|
|
|
}
|
|
|
|
|
|
|
|
first := requests[0]
|
2016-04-14 18:41:04 +00:00
|
|
|
if len(first.Allocs) != 2 && len(first.Evals) != 0 {
|
2016-03-30 22:17:13 +00:00
|
|
|
t.Fatalf("Unexpected first request: %v", first)
|
|
|
|
}
|
|
|
|
|
|
|
|
second := requests[1]
|
2016-04-14 18:41:04 +00:00
|
|
|
if len(second.Allocs) != 1 && len(second.Evals) != 1 {
|
2016-03-30 22:17:13 +00:00
|
|
|
t.Fatalf("Unexpected second request: %v", second)
|
|
|
|
}
|
|
|
|
|
|
|
|
third := requests[2]
|
2016-04-14 18:41:04 +00:00
|
|
|
if len(third.Allocs) != 0 && len(third.Evals) != 2 {
|
2016-03-30 22:17:13 +00:00
|
|
|
t.Fatalf("Unexpected third request: %v", third)
|
|
|
|
}
|
|
|
|
}
|
2017-06-29 19:32:37 +00:00
|
|
|
|
|
|
|
func TestCoreScheduler_PartitionDeploymentReap(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2017-06-29 19:32:37 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0
|
|
|
|
s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
// Create a core scheduler
|
|
|
|
snap, err := s1.fsm.State().Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
|
|
|
|
|
|
|
deployments := []string{"a", "b", "c"}
|
2023-02-10 14:26:00 +00:00
|
|
|
// Set the max ids per reap to something lower.
|
|
|
|
requests := core.(*CoreScheduler).partitionDeploymentReap(deployments, 2)
|
2017-06-29 19:32:37 +00:00
|
|
|
if len(requests) != 2 {
|
|
|
|
t.Fatalf("Expected 2 requests got: %v", requests)
|
|
|
|
}
|
|
|
|
|
|
|
|
first := requests[0]
|
|
|
|
if len(first.Deployments) != 2 {
|
|
|
|
t.Fatalf("Unexpected first request: %v", first)
|
|
|
|
}
|
|
|
|
|
|
|
|
second := requests[1]
|
|
|
|
if len(second.Deployments) != 1 {
|
|
|
|
t.Fatalf("Unexpected second request: %v", second)
|
|
|
|
}
|
|
|
|
}
|
2018-01-30 15:12:14 +00:00
|
|
|
|
2018-03-14 23:06:37 +00:00
|
|
|
func TestCoreScheduler_PartitionJobReap(t *testing.T) {
|
2023-02-10 14:26:00 +00:00
|
|
|
ci.Parallel(t)
|
2019-12-04 00:15:11 +00:00
|
|
|
|
|
|
|
s1, cleanupS1 := TestServer(t, nil)
|
|
|
|
defer cleanupS1()
|
2018-03-14 23:06:37 +00:00
|
|
|
testutil.WaitForLeader(t, s1.RPC)
|
|
|
|
|
|
|
|
// Create a core scheduler
|
|
|
|
snap, err := s1.fsm.State().Snapshot()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("err: %v", err)
|
|
|
|
}
|
|
|
|
core := NewCoreScheduler(s1, snap)
|
2023-02-10 14:26:00 +00:00
|
|
|
jobs := []*structs.Job{mock.Job(), mock.Job(), mock.Job()}
|
2018-03-14 23:06:37 +00:00
|
|
|
|
|
|
|
// Set the max ids per reap to something lower.
|
2023-02-10 14:26:00 +00:00
|
|
|
requests := core.(*CoreScheduler).partitionJobReap(jobs, "", 2)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.Len(t, requests, 2)
|
2018-03-14 23:06:37 +00:00
|
|
|
|
|
|
|
first := requests[0]
|
|
|
|
second := requests[1]
|
2022-04-01 19:17:58 +00:00
|
|
|
require.Len(t, first.Jobs, 2)
|
|
|
|
require.Len(t, second.Jobs, 1)
|
2018-03-14 23:06:37 +00:00
|
|
|
}
|
|
|
|
|
2018-01-30 15:12:14 +00:00
|
|
|
// Tests various scenarios when allocations are eligible to be GCed
|
|
|
|
func TestAllocation_GCEligible(t *testing.T) {
|
|
|
|
type testCase struct {
|
2018-11-01 05:02:26 +00:00
|
|
|
Desc string
|
|
|
|
GCTime time.Time
|
|
|
|
ClientStatus string
|
|
|
|
DesiredStatus string
|
|
|
|
JobStatus string
|
|
|
|
JobStop bool
|
|
|
|
AllocJobModifyIndex uint64
|
|
|
|
JobModifyIndex uint64
|
|
|
|
ModifyIndex uint64
|
|
|
|
NextAllocID string
|
|
|
|
ReschedulePolicy *structs.ReschedulePolicy
|
|
|
|
RescheduleTrackers []*structs.RescheduleEvent
|
|
|
|
ThresholdIndex uint64
|
|
|
|
ShouldGC bool
|
2018-01-30 15:12:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fail := time.Now()
|
|
|
|
|
|
|
|
harness := []testCase{
|
|
|
|
{
|
2018-04-11 20:12:23 +00:00
|
|
|
Desc: "Don't GC when non terminal",
|
2018-01-30 15:12:14 +00:00
|
|
|
ClientStatus: structs.AllocClientStatusPending,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
GCTime: fail,
|
|
|
|
ModifyIndex: 90,
|
|
|
|
ThresholdIndex: 90,
|
|
|
|
ShouldGC: false,
|
|
|
|
},
|
2018-01-30 22:14:53 +00:00
|
|
|
{
|
2018-04-11 20:12:23 +00:00
|
|
|
Desc: "Don't GC when non terminal and job stopped",
|
2018-01-30 22:14:53 +00:00
|
|
|
ClientStatus: structs.AllocClientStatusPending,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
JobStop: true,
|
|
|
|
GCTime: fail,
|
|
|
|
ModifyIndex: 90,
|
|
|
|
ThresholdIndex: 90,
|
|
|
|
ShouldGC: false,
|
|
|
|
},
|
|
|
|
{
|
2018-04-11 20:12:23 +00:00
|
|
|
Desc: "Don't GC when non terminal and job dead",
|
2018-01-30 22:14:53 +00:00
|
|
|
ClientStatus: structs.AllocClientStatusPending,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
JobStatus: structs.JobStatusDead,
|
|
|
|
GCTime: fail,
|
|
|
|
ModifyIndex: 90,
|
|
|
|
ThresholdIndex: 90,
|
|
|
|
ShouldGC: false,
|
|
|
|
},
|
2018-12-05 21:01:12 +00:00
|
|
|
{
|
|
|
|
Desc: "Don't GC when non terminal on client and job dead",
|
|
|
|
ClientStatus: structs.AllocClientStatusRunning,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusStop,
|
|
|
|
JobStatus: structs.JobStatusDead,
|
|
|
|
GCTime: fail,
|
|
|
|
ModifyIndex: 90,
|
|
|
|
ThresholdIndex: 90,
|
|
|
|
ShouldGC: false,
|
|
|
|
},
|
2018-01-30 15:12:14 +00:00
|
|
|
{
|
2018-04-11 18:58:02 +00:00
|
|
|
Desc: "GC when terminal but not failed ",
|
|
|
|
ClientStatus: structs.AllocClientStatusComplete,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
GCTime: fail,
|
2018-04-11 20:12:23 +00:00
|
|
|
ModifyIndex: 90,
|
2018-04-11 18:58:02 +00:00
|
|
|
ThresholdIndex: 90,
|
|
|
|
ReschedulePolicy: nil,
|
2018-04-11 20:12:23 +00:00
|
|
|
ShouldGC: true,
|
2018-04-11 18:58:02 +00:00
|
|
|
},
|
2018-01-30 15:12:14 +00:00
|
|
|
{
|
2018-04-11 20:12:23 +00:00
|
|
|
Desc: "Don't GC when threshold not met",
|
2018-01-30 15:12:14 +00:00
|
|
|
ClientStatus: structs.AllocClientStatusComplete,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusStop,
|
|
|
|
GCTime: fail,
|
|
|
|
ModifyIndex: 100,
|
|
|
|
ThresholdIndex: 90,
|
|
|
|
ReschedulePolicy: nil,
|
|
|
|
ShouldGC: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Desc: "GC when no reschedule policy",
|
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
GCTime: fail,
|
|
|
|
ReschedulePolicy: nil,
|
|
|
|
ModifyIndex: 90,
|
|
|
|
ThresholdIndex: 90,
|
|
|
|
ShouldGC: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Desc: "GC when empty policy",
|
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
GCTime: fail,
|
2018-02-28 18:21:27 +00:00
|
|
|
ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 0, Interval: 0 * time.Minute},
|
2018-01-30 15:12:14 +00:00
|
|
|
ModifyIndex: 90,
|
|
|
|
ThresholdIndex: 90,
|
|
|
|
ShouldGC: true,
|
|
|
|
},
|
|
|
|
{
|
2018-04-11 20:12:23 +00:00
|
|
|
Desc: "Don't GC when no previous reschedule attempts",
|
2018-01-30 15:12:14 +00:00
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
GCTime: fail,
|
|
|
|
ModifyIndex: 90,
|
|
|
|
ThresholdIndex: 90,
|
2018-02-28 18:21:27 +00:00
|
|
|
ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
|
2018-01-30 15:12:14 +00:00
|
|
|
ShouldGC: false,
|
|
|
|
},
|
|
|
|
{
|
2018-04-11 20:12:23 +00:00
|
|
|
Desc: "Don't GC when prev reschedule attempt within interval",
|
2018-01-30 15:12:14 +00:00
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
2018-02-28 18:21:27 +00:00
|
|
|
ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 2, Interval: 30 * time.Minute},
|
2018-01-30 15:12:14 +00:00
|
|
|
GCTime: fail,
|
|
|
|
ModifyIndex: 90,
|
|
|
|
ThresholdIndex: 90,
|
|
|
|
RescheduleTrackers: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: fail.Add(-5 * time.Minute).UTC().UnixNano(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ShouldGC: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Desc: "GC with prev reschedule attempt outside interval",
|
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
GCTime: fail,
|
2018-02-28 18:21:27 +00:00
|
|
|
ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute},
|
2018-01-30 15:12:14 +00:00
|
|
|
RescheduleTrackers: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: fail.Add(-45 * time.Minute).UTC().UnixNano(),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
RescheduleTime: fail.Add(-60 * time.Minute).UTC().UnixNano(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ShouldGC: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Desc: "GC when next alloc id is set",
|
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
GCTime: fail,
|
2018-02-28 18:21:27 +00:00
|
|
|
ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute},
|
2018-01-30 15:12:14 +00:00
|
|
|
RescheduleTrackers: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
NextAllocID: uuid.Generate(),
|
|
|
|
ShouldGC: true,
|
|
|
|
},
|
2018-04-11 18:58:02 +00:00
|
|
|
{
|
2018-04-11 20:12:23 +00:00
|
|
|
Desc: "Don't GC when next alloc id is not set and unlimited restarts",
|
2018-04-11 18:58:02 +00:00
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
GCTime: fail,
|
|
|
|
ReschedulePolicy: &structs.ReschedulePolicy{Unlimited: true, Delay: 5 * time.Second, DelayFunction: "constant"},
|
|
|
|
RescheduleTrackers: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ShouldGC: false,
|
2018-01-30 15:12:14 +00:00
|
|
|
},
|
2018-01-30 22:14:53 +00:00
|
|
|
{
|
|
|
|
Desc: "GC when job is stopped",
|
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
GCTime: fail,
|
2018-02-28 18:21:27 +00:00
|
|
|
ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute},
|
2018-01-30 22:14:53 +00:00
|
|
|
RescheduleTrackers: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
JobStop: true,
|
|
|
|
ShouldGC: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Desc: "GC when job status is dead",
|
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusRun,
|
|
|
|
GCTime: fail,
|
2018-02-28 18:21:27 +00:00
|
|
|
ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute},
|
2018-01-30 22:14:53 +00:00
|
|
|
RescheduleTrackers: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
JobStatus: structs.JobStatusDead,
|
|
|
|
ShouldGC: true,
|
|
|
|
},
|
2018-05-21 18:28:31 +00:00
|
|
|
{
|
|
|
|
Desc: "GC when desired status is stop, unlimited reschedule policy, no previous reschedule events",
|
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusStop,
|
|
|
|
GCTime: fail,
|
|
|
|
ReschedulePolicy: &structs.ReschedulePolicy{Unlimited: true, Delay: 5 * time.Second, DelayFunction: "constant"},
|
|
|
|
ShouldGC: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Desc: "GC when desired status is stop, limited reschedule policy, some previous reschedule events",
|
|
|
|
ClientStatus: structs.AllocClientStatusFailed,
|
|
|
|
DesiredStatus: structs.AllocDesiredStatusStop,
|
|
|
|
GCTime: fail,
|
|
|
|
ReschedulePolicy: &structs.ReschedulePolicy{Attempts: 5, Interval: 30 * time.Minute},
|
|
|
|
RescheduleTrackers: []*structs.RescheduleEvent{
|
|
|
|
{
|
|
|
|
RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
ShouldGC: true,
|
|
|
|
},
|
2018-01-30 15:12:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range harness {
|
|
|
|
alloc := &structs.Allocation{}
|
|
|
|
alloc.ModifyIndex = tc.ModifyIndex
|
|
|
|
alloc.DesiredStatus = tc.DesiredStatus
|
|
|
|
alloc.ClientStatus = tc.ClientStatus
|
2018-02-28 18:21:27 +00:00
|
|
|
alloc.RescheduleTracker = &structs.RescheduleTracker{Events: tc.RescheduleTrackers}
|
2018-01-30 22:14:53 +00:00
|
|
|
alloc.NextAllocation = tc.NextAllocID
|
|
|
|
job := mock.Job()
|
|
|
|
alloc.TaskGroup = job.TaskGroups[0].Name
|
|
|
|
job.TaskGroups[0].ReschedulePolicy = tc.ReschedulePolicy
|
|
|
|
if tc.JobStatus != "" {
|
|
|
|
job.Status = tc.JobStatus
|
|
|
|
}
|
|
|
|
job.Stop = tc.JobStop
|
2018-01-30 15:12:14 +00:00
|
|
|
|
|
|
|
t.Run(tc.Desc, func(t *testing.T) {
|
2018-01-30 22:14:53 +00:00
|
|
|
if got := allocGCEligible(alloc, job, tc.GCTime, tc.ThresholdIndex); got != tc.ShouldGC {
|
2018-01-30 15:12:14 +00:00
|
|
|
t.Fatalf("expected %v but got %v", tc.ShouldGC, got)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
2018-01-30 22:14:53 +00:00
|
|
|
|
|
|
|
// Verify nil job
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusComplete
|
2022-04-01 19:17:58 +00:00
|
|
|
require.True(t, allocGCEligible(alloc, nil, time.Now(), 1000))
|
2018-01-30 15:12:14 +00:00
|
|
|
}
|
2020-05-06 20:49:12 +00:00
|
|
|
|
|
|
|
func TestCoreScheduler_CSIPluginGC(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-05-06 20:49:12 +00:00
|
|
|
|
|
|
|
srv, cleanupSRV := TestServer(t, nil)
|
|
|
|
defer cleanupSRV()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
srv.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
|
|
|
deleteNodes := state.CreateTestCSIPlugin(srv.fsm.State(), "foo")
|
|
|
|
defer deleteNodes()
|
2022-04-01 19:17:58 +00:00
|
|
|
store := srv.fsm.State()
|
2020-05-06 20:49:12 +00:00
|
|
|
|
|
|
|
// Update the time tables to make this work
|
|
|
|
tt := srv.fsm.TimeTable()
|
|
|
|
index := uint64(2000)
|
2020-05-11 12:20:50 +00:00
|
|
|
tt.Witness(index, time.Now().UTC().Add(-1*srv.config.CSIPluginGCThreshold))
|
2020-05-06 20:49:12 +00:00
|
|
|
|
|
|
|
// Create a core scheduler
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
|
|
|
require.NoError(t, err)
|
2020-05-06 20:49:12 +00:00
|
|
|
core := NewCoreScheduler(srv, snap)
|
|
|
|
|
|
|
|
// Attempt the GC
|
|
|
|
index++
|
|
|
|
gc := srv.coreJobEval(structs.CoreJobCSIPluginGC, index)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, core.Process(gc))
|
2020-05-06 20:49:12 +00:00
|
|
|
|
|
|
|
// Should not be gone (plugin in use)
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-04-01 19:17:58 +00:00
|
|
|
plug, err := store.CSIPluginByID(ws, "foo")
|
|
|
|
require.NotNil(t, plug)
|
|
|
|
require.NoError(t, err)
|
2020-05-06 20:49:12 +00:00
|
|
|
|
|
|
|
// Empty the plugin
|
2023-02-27 13:47:08 +00:00
|
|
|
plug = plug.Copy()
|
2020-05-06 20:49:12 +00:00
|
|
|
plug.Controllers = map[string]*structs.CSIInfo{}
|
|
|
|
plug.Nodes = map[string]*structs.CSIInfo{}
|
|
|
|
|
|
|
|
index++
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertCSIPlugin(index, plug)
|
|
|
|
require.NoError(t, err)
|
2020-05-06 20:49:12 +00:00
|
|
|
|
|
|
|
// Retry
|
|
|
|
index++
|
|
|
|
gc = srv.coreJobEval(structs.CoreJobCSIPluginGC, index)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, core.Process(gc))
|
2020-05-06 20:49:12 +00:00
|
|
|
|
|
|
|
// Should be gone
|
2022-04-01 19:17:58 +00:00
|
|
|
plug, err = store.CSIPluginByID(ws, "foo")
|
|
|
|
require.Nil(t, plug)
|
|
|
|
require.NoError(t, err)
|
2020-05-06 20:49:12 +00:00
|
|
|
}
|
2020-05-11 12:20:50 +00:00
|
|
|
|
|
|
|
func TestCoreScheduler_CSIVolumeClaimGC(t *testing.T) {
|
|
|
|
srv, shutdown := TestServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
})
|
|
|
|
|
|
|
|
defer shutdown()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
|
|
|
|
index := uint64(1)
|
|
|
|
volID := uuid.Generate()
|
|
|
|
ns := structs.DefaultNamespace
|
|
|
|
pluginID := "foo"
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
store := srv.fsm.State()
|
2020-05-11 12:20:50 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
index, _ = store.LatestIndex()
|
2020-05-11 12:20:50 +00:00
|
|
|
|
|
|
|
// Create client node and plugin
|
|
|
|
node := mock.Node()
|
|
|
|
node.Attributes["nomad.version"] = "0.11.0" // needs client RPCs
|
|
|
|
node.CSINodePlugins = map[string]*structs.CSIInfo{
|
|
|
|
pluginID: {
|
|
|
|
PluginID: pluginID,
|
|
|
|
Healthy: true,
|
|
|
|
NodeInfo: &structs.CSINodeInfo{},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
index++
|
2022-04-01 19:17:58 +00:00
|
|
|
err := store.UpsertNode(structs.MsgTypeTestSetup, index, node)
|
|
|
|
require.NoError(t, err)
|
2020-05-11 12:20:50 +00:00
|
|
|
|
2022-04-04 14:46:45 +00:00
|
|
|
// *Important*: for volume writes in this test we must use RPCs
|
|
|
|
// rather than StateStore methods directly, or the blocking query
|
|
|
|
// in volumewatcher won't get the final update for GC because it's
|
|
|
|
// watching on a different store at that point
|
2020-05-11 12:20:50 +00:00
|
|
|
|
|
|
|
// Register a volume
|
|
|
|
vols := []*structs.CSIVolume{{
|
2021-04-08 14:42:19 +00:00
|
|
|
ID: volID,
|
|
|
|
Namespace: ns,
|
|
|
|
PluginID: pluginID,
|
|
|
|
Topologies: []*structs.CSITopology{},
|
|
|
|
RequestedCapabilities: []*structs.CSIVolumeCapability{{
|
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeSingleWriter,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
|
|
|
}},
|
2020-05-11 12:20:50 +00:00
|
|
|
}}
|
|
|
|
volReq := &structs.CSIVolumeRegisterRequest{Volumes: vols}
|
|
|
|
volReq.Namespace = ns
|
|
|
|
volReq.Region = srv.config.Region
|
|
|
|
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Register",
|
|
|
|
volReq, &structs.CSIVolumeRegisterResponse{})
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, err)
|
2020-05-11 12:20:50 +00:00
|
|
|
|
2022-04-04 14:46:45 +00:00
|
|
|
// Create a job with two allocs that claim the volume.
|
2020-05-11 12:20:50 +00:00
|
|
|
// We use two allocs here, one of which is not running, so
|
2022-04-04 14:46:45 +00:00
|
|
|
// that we can assert the volumewatcher has made one
|
|
|
|
// complete pass (and removed the 2nd alloc) before we
|
|
|
|
// run the GC
|
2020-05-11 12:20:50 +00:00
|
|
|
eval := mock.Eval()
|
|
|
|
eval.Status = structs.EvalStatusFailed
|
|
|
|
index++
|
2022-04-01 19:17:58 +00:00
|
|
|
store.UpsertJobSummary(index, mock.JobSummary(eval.JobID))
|
2020-05-11 12:20:50 +00:00
|
|
|
index++
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertEvals(structs.MsgTypeTestSetup, index, []*structs.Evaluation{eval})
|
|
|
|
require.Nil(t, err)
|
2020-05-11 12:20:50 +00:00
|
|
|
|
|
|
|
job := mock.Job()
|
|
|
|
job.ID = eval.JobID
|
|
|
|
job.Status = structs.JobStatusRunning
|
|
|
|
index++
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.UpsertJob(structs.MsgTypeTestSetup, index, job)
|
|
|
|
require.NoError(t, err)
|
2020-05-11 12:20:50 +00:00
|
|
|
|
|
|
|
alloc1, alloc2 := mock.Alloc(), mock.Alloc()
|
|
|
|
alloc1.NodeID = node.ID
|
|
|
|
alloc1.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
alloc1.Job = job
|
|
|
|
alloc1.JobID = job.ID
|
|
|
|
alloc1.EvalID = eval.ID
|
|
|
|
|
|
|
|
alloc2.NodeID = node.ID
|
|
|
|
alloc2.ClientStatus = structs.AllocClientStatusComplete
|
2022-04-04 14:46:45 +00:00
|
|
|
alloc2.DesiredStatus = structs.AllocDesiredStatusStop
|
2020-05-11 12:20:50 +00:00
|
|
|
alloc2.Job = job
|
|
|
|
alloc2.JobID = job.ID
|
|
|
|
alloc2.EvalID = eval.ID
|
|
|
|
|
|
|
|
summary := mock.JobSummary(alloc1.JobID)
|
|
|
|
index++
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, store.UpsertJobSummary(index, summary))
|
2020-05-11 12:20:50 +00:00
|
|
|
summary = mock.JobSummary(alloc2.JobID)
|
|
|
|
index++
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, store.UpsertJobSummary(index, summary))
|
2020-05-11 12:20:50 +00:00
|
|
|
index++
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, store.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc1, alloc2}))
|
2020-05-11 12:20:50 +00:00
|
|
|
|
|
|
|
req := &structs.CSIVolumeClaimRequest{
|
2022-04-04 14:46:45 +00:00
|
|
|
VolumeID: volID,
|
|
|
|
AllocationID: alloc1.ID,
|
|
|
|
NodeID: uuid.Generate(), // doesn't exist so we don't get errors trying to unmount volumes from it
|
|
|
|
Claim: structs.CSIVolumeClaimWrite,
|
|
|
|
AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
|
|
|
|
AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
|
|
|
|
State: structs.CSIVolumeClaimStateTaken,
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Namespace: ns,
|
|
|
|
Region: srv.config.Region,
|
|
|
|
},
|
2020-05-11 12:20:50 +00:00
|
|
|
}
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim",
|
|
|
|
req, &structs.CSIVolumeClaimResponse{})
|
2022-04-04 14:46:45 +00:00
|
|
|
require.NoError(t, err, "write claim should succeed")
|
|
|
|
|
|
|
|
req.AllocationID = alloc2.ID
|
|
|
|
req.State = structs.CSIVolumeClaimStateUnpublishing
|
|
|
|
|
|
|
|
err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Claim",
|
|
|
|
req, &structs.CSIVolumeClaimResponse{})
|
|
|
|
require.NoError(t, err, "unpublishing claim should succeed")
|
2020-05-11 12:20:50 +00:00
|
|
|
|
2022-04-04 14:46:45 +00:00
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
vol, err := store.CSIVolumeByID(ws, ns, volID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
return len(vol.WriteClaims) == 1 &&
|
|
|
|
len(vol.WriteAllocs) == 1 &&
|
|
|
|
len(vol.PastClaims) == 0
|
|
|
|
}, time.Second*1, 100*time.Millisecond,
|
|
|
|
"volumewatcher should have released unpublishing claim without GC")
|
|
|
|
|
|
|
|
// At this point we can guarantee that volumewatcher is waiting
|
|
|
|
// for new work. Delete allocation and job so that the next pass
|
|
|
|
// thru volumewatcher has more work to do
|
|
|
|
index, _ = store.LatestIndex()
|
2020-05-11 12:20:50 +00:00
|
|
|
index++
|
2022-04-01 19:17:58 +00:00
|
|
|
err = store.DeleteJob(index, ns, job.ID)
|
|
|
|
require.NoError(t, err)
|
2022-04-04 14:46:45 +00:00
|
|
|
index, _ = store.LatestIndex()
|
2020-05-11 12:20:50 +00:00
|
|
|
index++
|
2022-07-06 14:30:11 +00:00
|
|
|
err = store.DeleteEval(index, []string{eval.ID}, []string{alloc1.ID}, false)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, err)
|
2020-05-11 12:20:50 +00:00
|
|
|
|
|
|
|
// Create a core scheduler and attempt the volume claim GC
|
2022-04-01 19:17:58 +00:00
|
|
|
snap, err := store.Snapshot()
|
|
|
|
require.NoError(t, err)
|
2022-04-04 14:46:45 +00:00
|
|
|
|
2020-05-11 12:20:50 +00:00
|
|
|
core := NewCoreScheduler(srv, snap)
|
|
|
|
|
2022-04-04 14:46:45 +00:00
|
|
|
index, _ = snap.LatestIndex()
|
2020-05-11 12:20:50 +00:00
|
|
|
index++
|
|
|
|
gc := srv.coreJobEval(structs.CoreJobForceGC, index)
|
|
|
|
c := core.(*CoreScheduler)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, c.csiVolumeClaimGC(gc))
|
2020-05-11 12:20:50 +00:00
|
|
|
|
2022-04-04 14:46:45 +00:00
|
|
|
// the only remaining claim is for a deleted alloc with no path to
|
|
|
|
// the non-existent node, so volumewatcher will release the
|
|
|
|
// remaining claim
|
2022-04-01 19:17:58 +00:00
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
vol, _ := store.CSIVolumeByID(ws, ns, volID)
|
2022-03-29 13:44:00 +00:00
|
|
|
return len(vol.WriteClaims) == 0 &&
|
|
|
|
len(vol.WriteAllocs) == 0 &&
|
|
|
|
len(vol.PastClaims) == 0
|
|
|
|
}, time.Second*2, 10*time.Millisecond, "claims were not released")
|
2020-05-11 12:20:50 +00:00
|
|
|
|
|
|
|
}
|
2020-08-18 20:48:43 +00:00
|
|
|
|
2022-01-27 14:30:03 +00:00
|
|
|
func TestCoreScheduler_CSIBadState_ClaimGC(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2022-01-27 14:30:03 +00:00
|
|
|
|
|
|
|
srv, shutdown := TestServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
})
|
|
|
|
|
|
|
|
defer shutdown()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
err := state.TestBadCSIState(t, srv.State())
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, err)
|
2022-01-27 14:30:03 +00:00
|
|
|
|
|
|
|
snap, err := srv.State().Snapshot()
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, err)
|
2022-01-27 14:30:03 +00:00
|
|
|
core := NewCoreScheduler(srv, snap)
|
|
|
|
|
|
|
|
index, _ := srv.State().LatestIndex()
|
|
|
|
index++
|
|
|
|
gc := srv.coreJobEval(structs.CoreJobForceGC, index)
|
|
|
|
c := core.(*CoreScheduler)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, c.csiVolumeClaimGC(gc))
|
2022-01-27 14:30:03 +00:00
|
|
|
|
2022-04-01 19:17:58 +00:00
|
|
|
require.Eventually(t, func() bool {
|
2022-01-27 14:30:03 +00:00
|
|
|
vol, _ := srv.State().CSIVolumeByID(nil,
|
|
|
|
structs.DefaultNamespace, "csi-volume-nfs0")
|
|
|
|
if len(vol.PastClaims) != 2 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for _, claim := range vol.PastClaims {
|
|
|
|
if claim.State != structs.CSIVolumeClaimStateUnpublishing {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
2022-09-27 12:43:45 +00:00
|
|
|
}, time.Second*5, 10*time.Millisecond, "invalid claims should be marked for GC")
|
2022-01-27 14:30:03 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2022-06-02 19:48:17 +00:00
|
|
|
// TestCoreScheduler_RootKeyGC exercises root key GC
|
|
|
|
func TestCoreScheduler_RootKeyGC(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
srv, cleanup := TestServer(t, nil)
|
|
|
|
defer cleanup()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
// reset the time table
|
|
|
|
srv.fsm.timetable.table = make([]TimeTableEntry, 1, 10)
|
|
|
|
|
2022-11-01 19:00:50 +00:00
|
|
|
// active key, will never be GC'd
|
2022-06-02 19:48:17 +00:00
|
|
|
store := srv.fsm.State()
|
|
|
|
key0, err := store.GetActiveRootKeyMeta(nil)
|
|
|
|
require.NotNil(t, key0, "expected keyring to be bootstapped")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-11-01 19:00:50 +00:00
|
|
|
// insert an "old" inactive key
|
|
|
|
key1 := structs.NewRootKeyMeta()
|
|
|
|
key1.SetInactive()
|
|
|
|
require.NoError(t, store.UpsertRootKeyMeta(600, key1, false))
|
|
|
|
|
|
|
|
// insert an "old" and inactive key with a variable that's using it
|
|
|
|
key2 := structs.NewRootKeyMeta()
|
|
|
|
key2.SetInactive()
|
|
|
|
require.NoError(t, store.UpsertRootKeyMeta(700, key2, false))
|
|
|
|
|
|
|
|
variable := mock.VariableEncrypted()
|
|
|
|
variable.KeyID = key2.KeyID
|
|
|
|
|
|
|
|
setResp := store.VarSet(601, &structs.VarApplyStateRequest{
|
|
|
|
Op: structs.VarOpSet,
|
|
|
|
Var: variable,
|
|
|
|
})
|
|
|
|
require.NoError(t, setResp.Error)
|
|
|
|
|
|
|
|
// insert an "old" key that's inactive but being used by an alloc
|
|
|
|
key3 := structs.NewRootKeyMeta()
|
|
|
|
key3.SetInactive()
|
|
|
|
require.NoError(t, store.UpsertRootKeyMeta(800, key3, false))
|
|
|
|
|
|
|
|
// insert the allocation using key3
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
alloc.ClientStatus = structs.AllocClientStatusRunning
|
|
|
|
alloc.SigningKeyID = key3.KeyID
|
|
|
|
require.NoError(t, store.UpsertAllocs(
|
|
|
|
structs.MsgTypeTestSetup, 850, []*structs.Allocation{alloc}))
|
|
|
|
|
|
|
|
// insert an "old" key that's inactive but being used by an alloc
|
|
|
|
key4 := structs.NewRootKeyMeta()
|
|
|
|
key4.SetInactive()
|
|
|
|
require.NoError(t, store.UpsertRootKeyMeta(900, key4, false))
|
|
|
|
|
|
|
|
// insert the dead allocation using key4
|
|
|
|
alloc2 := mock.Alloc()
|
|
|
|
alloc2.ClientStatus = structs.AllocClientStatusFailed
|
|
|
|
alloc2.DesiredStatus = structs.AllocDesiredStatusStop
|
|
|
|
alloc2.SigningKeyID = key4.KeyID
|
|
|
|
require.NoError(t, store.UpsertAllocs(
|
|
|
|
structs.MsgTypeTestSetup, 950, []*structs.Allocation{alloc2}))
|
|
|
|
|
|
|
|
// insert a time table index before the last key
|
|
|
|
tt := srv.fsm.TimeTable()
|
|
|
|
tt.Witness(1000, time.Now().UTC().Add(-1*srv.config.RootKeyGCThreshold))
|
|
|
|
|
|
|
|
// insert a "new" but inactive key
|
|
|
|
key5 := structs.NewRootKeyMeta()
|
|
|
|
key5.SetInactive()
|
|
|
|
require.NoError(t, store.UpsertRootKeyMeta(1500, key5, false))
|
|
|
|
|
2022-06-02 19:48:17 +00:00
|
|
|
// run the core job
|
|
|
|
snap, err := store.Snapshot()
|
|
|
|
require.NoError(t, err)
|
|
|
|
core := NewCoreScheduler(srv, snap)
|
2022-06-20 20:26:05 +00:00
|
|
|
eval := srv.coreJobEval(structs.CoreJobRootKeyRotateOrGC, 2000)
|
2022-06-02 19:48:17 +00:00
|
|
|
c := core.(*CoreScheduler)
|
2022-11-01 19:00:50 +00:00
|
|
|
require.NoError(t, c.rootKeyRotateOrGC(eval))
|
2022-06-02 19:48:17 +00:00
|
|
|
|
2022-11-01 19:00:50 +00:00
|
|
|
ws := memdb.NewWatchSet()
|
|
|
|
key, err := store.RootKeyMetaByID(ws, key0.KeyID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, key, "active key should not have been GCd")
|
2022-06-02 19:48:17 +00:00
|
|
|
|
2022-11-01 19:00:50 +00:00
|
|
|
key, err = store.RootKeyMetaByID(ws, key1.KeyID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, key, "old and unused inactive key should have been GCd")
|
2022-06-20 20:26:05 +00:00
|
|
|
|
2022-11-01 19:00:50 +00:00
|
|
|
key, err = store.RootKeyMetaByID(ws, key2.KeyID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, key, "old key should not have been GCd if still in use")
|
|
|
|
|
|
|
|
key, err = store.RootKeyMetaByID(ws, key3.KeyID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, key, "old key used to sign a live alloc should not have been GCd")
|
|
|
|
|
|
|
|
key, err = store.RootKeyMetaByID(ws, key4.KeyID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, key, "old key used to sign a terminal alloc should have been GCd")
|
|
|
|
|
|
|
|
key, err = store.RootKeyMetaByID(ws, key5.KeyID)
|
2022-06-20 20:26:05 +00:00
|
|
|
require.NoError(t, err)
|
2022-11-01 19:00:50 +00:00
|
|
|
require.NotNil(t, key, "new key should not have been GCd")
|
|
|
|
|
2022-06-02 19:48:17 +00:00
|
|
|
}
|
|
|
|
|
2022-08-26 18:03:56 +00:00
|
|
|
// TestCoreScheduler_VariablesRekey exercises variables rekeying
|
|
|
|
func TestCoreScheduler_VariablesRekey(t *testing.T) {
|
2022-07-07 17:48:38 +00:00
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
srv, cleanup := TestServer(t, nil)
|
|
|
|
defer cleanup()
|
|
|
|
testutil.WaitForLeader(t, srv.RPC)
|
|
|
|
|
|
|
|
store := srv.fsm.State()
|
|
|
|
key0, err := store.GetActiveRootKeyMeta(nil)
|
|
|
|
require.NotNil(t, key0, "expected keyring to be bootstapped")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-08-15 15:19:53 +00:00
|
|
|
for i := 0; i < 3; i++ {
|
2022-08-26 18:03:56 +00:00
|
|
|
req := &structs.VariablesApplyRequest{
|
|
|
|
Op: structs.VarOpSet,
|
|
|
|
Var: mock.Variable(),
|
2022-08-15 15:19:53 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: srv.config.Region},
|
|
|
|
}
|
2022-08-26 18:03:56 +00:00
|
|
|
resp := &structs.VariablesApplyResponse{}
|
|
|
|
require.NoError(t, srv.RPC("Variables.Apply", req, resp))
|
2022-07-07 17:48:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rotateReq := &structs.KeyringRotateRootKeyRequest{
|
|
|
|
WriteRequest: structs.WriteRequest{
|
|
|
|
Region: srv.config.Region,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
var rotateResp structs.KeyringRotateRootKeyResponse
|
|
|
|
require.NoError(t, srv.RPC("Keyring.Rotate", rotateReq, &rotateResp))
|
|
|
|
|
2022-08-15 15:19:53 +00:00
|
|
|
for i := 0; i < 3; i++ {
|
2022-08-26 18:03:56 +00:00
|
|
|
req := &structs.VariablesApplyRequest{
|
|
|
|
Op: structs.VarOpSet,
|
|
|
|
Var: mock.Variable(),
|
2022-08-15 15:19:53 +00:00
|
|
|
WriteRequest: structs.WriteRequest{Region: srv.config.Region},
|
|
|
|
}
|
2022-08-26 18:03:56 +00:00
|
|
|
resp := &structs.VariablesApplyResponse{}
|
|
|
|
require.NoError(t, srv.RPC("Variables.Apply", req, resp))
|
2022-07-07 17:48:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rotateReq.Full = true
|
|
|
|
require.NoError(t, srv.RPC("Keyring.Rotate", rotateReq, &rotateResp))
|
|
|
|
newKeyID := rotateResp.Key.KeyID
|
|
|
|
|
|
|
|
require.Eventually(t, func() bool {
|
|
|
|
ws := memdb.NewWatchSet()
|
2022-08-26 18:03:56 +00:00
|
|
|
iter, err := store.Variables(ws)
|
2022-07-07 17:48:38 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
for {
|
|
|
|
raw := iter.Next()
|
|
|
|
if raw == nil {
|
|
|
|
break
|
|
|
|
}
|
2022-08-26 18:03:56 +00:00
|
|
|
variable := raw.(*structs.VariableEncrypted)
|
2022-07-07 17:48:38 +00:00
|
|
|
if variable.KeyID != newKeyID {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}, time.Second*5, 100*time.Millisecond,
|
2022-08-26 18:03:56 +00:00
|
|
|
"variable rekey should be complete")
|
2022-07-07 17:48:38 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-08-18 20:48:43 +00:00
|
|
|
func TestCoreScheduler_FailLoop(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2020-08-18 20:48:43 +00:00
|
|
|
|
|
|
|
srv, cleanupSrv := TestServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
|
|
|
c.EvalDeliveryLimit = 2
|
|
|
|
c.EvalFailedFollowupBaselineDelay = time.Duration(50 * time.Millisecond)
|
|
|
|
c.EvalFailedFollowupDelayRange = time.Duration(1 * time.Millisecond)
|
|
|
|
})
|
|
|
|
defer cleanupSrv()
|
|
|
|
codec := rpcClient(t, srv)
|
|
|
|
sched := []string{structs.JobTypeCore}
|
|
|
|
|
|
|
|
testutil.WaitForResult(func() (bool, error) {
|
|
|
|
return srv.evalBroker.Enabled(), nil
|
|
|
|
}, func(err error) {
|
|
|
|
t.Fatalf("should enable eval broker")
|
|
|
|
})
|
|
|
|
|
|
|
|
// Enqueue a core job eval that can never succeed because it was enqueued
|
|
|
|
// by another leader that's now gone
|
|
|
|
expected := srv.coreJobEval(structs.CoreJobCSIPluginGC, 100)
|
|
|
|
expected.LeaderACL = "nonsense"
|
|
|
|
srv.evalBroker.Enqueue(expected)
|
|
|
|
|
|
|
|
nack := func(evalID, token string) error {
|
|
|
|
req := &structs.EvalAckRequest{
|
|
|
|
EvalID: evalID,
|
|
|
|
Token: token,
|
|
|
|
WriteRequest: structs.WriteRequest{Region: "global"},
|
|
|
|
}
|
|
|
|
var resp structs.GenericResponse
|
|
|
|
return msgpackrpc.CallWithCodec(codec, "Eval.Nack", req, &resp)
|
|
|
|
}
|
|
|
|
|
|
|
|
out, token, err := srv.evalBroker.Dequeue(sched, time.Second*5)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, out)
|
|
|
|
require.Equal(t, expected, out)
|
2020-08-18 20:48:43 +00:00
|
|
|
|
|
|
|
// first fail
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, nack(out.ID, token))
|
2020-08-18 20:48:43 +00:00
|
|
|
|
|
|
|
out, token, err = srv.evalBroker.Dequeue(sched, time.Second*5)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, out)
|
|
|
|
require.Equal(t, expected, out)
|
2020-08-18 20:48:43 +00:00
|
|
|
|
|
|
|
// second fail, should not result in failed-follow-up
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, nack(out.ID, token))
|
2020-08-18 20:48:43 +00:00
|
|
|
|
|
|
|
out, token, err = srv.evalBroker.Dequeue(sched, time.Second*5)
|
2022-04-01 19:17:58 +00:00
|
|
|
require.NoError(t, err)
|
2020-08-18 20:48:43 +00:00
|
|
|
if out != nil {
|
|
|
|
t.Fatalf(
|
|
|
|
"failed core jobs should not result in follow-up. TriggeredBy: %v",
|
|
|
|
out.TriggeredBy)
|
|
|
|
}
|
|
|
|
}
|
2022-07-19 13:37:46 +00:00
|
|
|
|
|
|
|
func TestCoreScheduler_ExpiredACLTokenGC(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
testServer, rootACLToken, testServerShutdown := TestACLServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0
|
|
|
|
})
|
|
|
|
defer testServerShutdown()
|
|
|
|
testutil.WaitForLeader(t, testServer.RPC)
|
|
|
|
|
|
|
|
now := time.Now().UTC()
|
|
|
|
|
|
|
|
// Craft some specific local and global tokens. For each type, one is
|
|
|
|
// expired, one is not.
|
|
|
|
expiredGlobal := mock.ACLToken()
|
|
|
|
expiredGlobal.Global = true
|
|
|
|
expiredGlobal.ExpirationTime = pointer.Of(now.Add(-2 * time.Hour))
|
|
|
|
|
|
|
|
unexpiredGlobal := mock.ACLToken()
|
|
|
|
unexpiredGlobal.Global = true
|
|
|
|
unexpiredGlobal.ExpirationTime = pointer.Of(now.Add(2 * time.Hour))
|
|
|
|
|
|
|
|
expiredLocal := mock.ACLToken()
|
|
|
|
expiredLocal.ExpirationTime = pointer.Of(now.Add(-2 * time.Hour))
|
|
|
|
|
|
|
|
unexpiredLocal := mock.ACLToken()
|
|
|
|
unexpiredLocal.ExpirationTime = pointer.Of(now.Add(2 * time.Hour))
|
|
|
|
|
|
|
|
// Upsert these into state.
|
|
|
|
err := testServer.State().UpsertACLTokens(structs.MsgTypeTestSetup, 10, []*structs.ACLToken{
|
|
|
|
expiredGlobal, unexpiredGlobal, expiredLocal, unexpiredLocal,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Overwrite the timetable. The existing timetable has an entry due to the
|
|
|
|
// ACL bootstrapping which makes witnessing a new index at a timestamp in
|
|
|
|
// the past impossible.
|
|
|
|
tt := NewTimeTable(timeTableGranularity, timeTableLimit)
|
|
|
|
tt.Witness(20, time.Now().UTC().Add(-1*testServer.config.ACLTokenExpirationGCThreshold))
|
|
|
|
testServer.fsm.timetable = tt
|
|
|
|
|
|
|
|
// Generate the core scheduler.
|
|
|
|
snap, err := testServer.State().Snapshot()
|
|
|
|
require.NoError(t, err)
|
|
|
|
coreScheduler := NewCoreScheduler(testServer, snap)
|
|
|
|
|
|
|
|
// Trigger global and local periodic garbage collection runs.
|
|
|
|
index, err := testServer.State().LatestIndex()
|
|
|
|
require.NoError(t, err)
|
|
|
|
index++
|
|
|
|
|
|
|
|
globalGCEval := testServer.coreJobEval(structs.CoreJobGlobalTokenExpiredGC, index)
|
|
|
|
require.NoError(t, coreScheduler.Process(globalGCEval))
|
|
|
|
|
|
|
|
localGCEval := testServer.coreJobEval(structs.CoreJobLocalTokenExpiredGC, index)
|
|
|
|
require.NoError(t, coreScheduler.Process(localGCEval))
|
|
|
|
|
|
|
|
// Ensure the ACL tokens stored within state are as expected.
|
|
|
|
iter, err := testServer.State().ACLTokens(nil, state.SortDefault)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
var tokens []*structs.ACLToken
|
|
|
|
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
|
|
|
tokens = append(tokens, raw.(*structs.ACLToken))
|
|
|
|
}
|
|
|
|
require.ElementsMatch(t, []*structs.ACLToken{rootACLToken, unexpiredGlobal, unexpiredLocal}, tokens)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCoreScheduler_ExpiredACLTokenGC_Force(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
testServer, rootACLToken, testServerShutdown := TestACLServer(t, func(c *Config) {
|
|
|
|
c.NumSchedulers = 0
|
|
|
|
})
|
|
|
|
defer testServerShutdown()
|
|
|
|
testutil.WaitForLeader(t, testServer.RPC)
|
|
|
|
|
|
|
|
// This time is the threshold for all expiry calls to be based on. All
|
|
|
|
// tokens with expiry can use this as their base and use Add().
|
|
|
|
expiryTimeThreshold := time.Now().UTC()
|
|
|
|
|
|
|
|
// Track expired and non-expired tokens for local and global tokens in
|
|
|
|
// separate arrays, so we have a clear way to test state.
|
|
|
|
var expiredGlobalTokens, nonExpiredGlobalTokens, expiredLocalTokens, nonExpiredLocalTokens []*structs.ACLToken
|
|
|
|
|
|
|
|
// Add the root ACL token to the appropriate array. This will be returned
|
|
|
|
// from state so must be accounted for and tested.
|
|
|
|
nonExpiredGlobalTokens = append(nonExpiredGlobalTokens, rootACLToken)
|
|
|
|
|
|
|
|
// Generate and upsert a number of mixed expired, non-expired global
|
|
|
|
// tokens.
|
|
|
|
for i := 0; i < 20; i++ {
|
|
|
|
mockedToken := mock.ACLToken()
|
|
|
|
mockedToken.Global = true
|
|
|
|
if i%2 == 0 {
|
|
|
|
expiredGlobalTokens = append(expiredGlobalTokens, mockedToken)
|
|
|
|
mockedToken.ExpirationTime = pointer.Of(expiryTimeThreshold.Add(-24 * time.Hour))
|
|
|
|
} else {
|
|
|
|
nonExpiredGlobalTokens = append(nonExpiredGlobalTokens, mockedToken)
|
|
|
|
mockedToken.ExpirationTime = pointer.Of(expiryTimeThreshold.Add(24 * time.Hour))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate and upsert a number of mixed expired, non-expired local
|
|
|
|
// tokens.
|
|
|
|
for i := 0; i < 20; i++ {
|
|
|
|
mockedToken := mock.ACLToken()
|
|
|
|
mockedToken.Global = false
|
|
|
|
if i%2 == 0 {
|
|
|
|
expiredLocalTokens = append(expiredLocalTokens, mockedToken)
|
|
|
|
mockedToken.ExpirationTime = pointer.Of(expiryTimeThreshold.Add(-24 * time.Hour))
|
|
|
|
} else {
|
|
|
|
nonExpiredLocalTokens = append(nonExpiredLocalTokens, mockedToken)
|
|
|
|
mockedToken.ExpirationTime = pointer.Of(expiryTimeThreshold.Add(24 * time.Hour))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
allTokens := append(expiredGlobalTokens, nonExpiredGlobalTokens...)
|
|
|
|
allTokens = append(allTokens, expiredLocalTokens...)
|
|
|
|
allTokens = append(allTokens, nonExpiredLocalTokens...)
|
|
|
|
|
|
|
|
// Upsert them all.
|
|
|
|
err := testServer.State().UpsertACLTokens(structs.MsgTypeTestSetup, 10, allTokens)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// This function provides an easy way to get all tokens out of the
|
|
|
|
// iterator.
|
|
|
|
fromIteratorFunc := func(iter memdb.ResultIterator) []*structs.ACLToken {
|
|
|
|
var tokens []*structs.ACLToken
|
|
|
|
for raw := iter.Next(); raw != nil; raw = iter.Next() {
|
|
|
|
tokens = append(tokens, raw.(*structs.ACLToken))
|
|
|
|
}
|
|
|
|
return tokens
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check all the tokens are correctly stored within state.
|
|
|
|
iter, err := testServer.State().ACLTokens(nil, state.SortDefault)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
tokens := fromIteratorFunc(iter)
|
|
|
|
require.ElementsMatch(t, allTokens, tokens)
|
|
|
|
|
|
|
|
// Generate the core scheduler and trigger a forced garbage collection
|
|
|
|
// which should delete all expired tokens.
|
|
|
|
snap, err := testServer.State().Snapshot()
|
|
|
|
require.NoError(t, err)
|
|
|
|
coreScheduler := NewCoreScheduler(testServer, snap)
|
|
|
|
|
|
|
|
index, err := testServer.State().LatestIndex()
|
|
|
|
require.NoError(t, err)
|
|
|
|
index++
|
|
|
|
|
|
|
|
forceGCEval := testServer.coreJobEval(structs.CoreJobForceGC, index)
|
|
|
|
require.NoError(t, coreScheduler.Process(forceGCEval))
|
|
|
|
|
|
|
|
// List all the remaining ACL tokens to be sure they are as expected.
|
|
|
|
iter, err = testServer.State().ACLTokens(nil, state.SortDefault)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
tokens = fromIteratorFunc(iter)
|
|
|
|
require.ElementsMatch(t, append(nonExpiredGlobalTokens, nonExpiredLocalTokens...), tokens)
|
|
|
|
}
|