core: allow setting and propagation of eval priority on job de/registration (#11532)
This change modifies the Nomad job register and deregister RPCs to accept an updated option set which includes eval priority. This param is optional and override the use of the job priority to set the eval priority. In order to ensure all evaluations as a result of the request use the same eval priority, the priority is shared to the allocReconciler and deploymentWatcher. This creates a new distinction between eval priority and job priority. The Nomad agent HTTP API has been modified to allow setting the eval priority on job update and delete. To keep consistency with the current v1 API, job update accepts this as a payload param; job delete accepts this as a query param. Any user supplied value is validated within the agent HTTP handler removing the need to pass invalid requests to the server. The register and deregister opts functions now all for setting the eval priority on requests. The change includes a small change to the DeregisterOpts function which handles nil opts. This brings the function inline with the RegisterOpts.
This commit is contained in:
parent
d0f27f573f
commit
751c8217d1
|
@ -0,0 +1,3 @@
|
||||||
|
```release-note:improvement
|
||||||
|
core: allow setting and propagation of eval priority on job de/registration
|
||||||
|
```
|
35
api/jobs.go
35
api/jobs.go
|
@ -91,6 +91,7 @@ type RegisterOptions struct {
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
PolicyOverride bool
|
PolicyOverride bool
|
||||||
PreserveCounts bool
|
PreserveCounts bool
|
||||||
|
EvalPriority int
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register is used to register a new job. It returns the ID
|
// Register is used to register a new job. It returns the ID
|
||||||
|
@ -105,8 +106,8 @@ func (j *Jobs) EnforceRegister(job *Job, modifyIndex uint64, q *WriteOptions) (*
|
||||||
return j.RegisterOpts(job, &opts, q)
|
return j.RegisterOpts(job, &opts, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register is used to register a new job. It returns the ID
|
// RegisterOpts is used to register a new job with the passed RegisterOpts. It
|
||||||
// of the evaluation, along with any errors encountered.
|
// returns the ID of the evaluation, along with any errors encountered.
|
||||||
func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (*JobRegisterResponse, *WriteMeta, error) {
|
||||||
// Format the request
|
// Format the request
|
||||||
req := &JobRegisterRequest{
|
req := &JobRegisterRequest{
|
||||||
|
@ -119,6 +120,7 @@ func (j *Jobs) RegisterOpts(job *Job, opts *RegisterOptions, q *WriteOptions) (*
|
||||||
}
|
}
|
||||||
req.PolicyOverride = opts.PolicyOverride
|
req.PolicyOverride = opts.PolicyOverride
|
||||||
req.PreserveCounts = opts.PreserveCounts
|
req.PreserveCounts = opts.PreserveCounts
|
||||||
|
req.EvalPriority = opts.EvalPriority
|
||||||
}
|
}
|
||||||
|
|
||||||
var resp JobRegisterResponse
|
var resp JobRegisterResponse
|
||||||
|
@ -290,14 +292,31 @@ type DeregisterOptions struct {
|
||||||
// If Global is set to true, all regions of a multiregion job will be
|
// If Global is set to true, all regions of a multiregion job will be
|
||||||
// stopped.
|
// stopped.
|
||||||
Global bool
|
Global bool
|
||||||
|
|
||||||
|
// EvalPriority is an optional priority to use on any evaluation created as
|
||||||
|
// a result on this job deregistration. This value must be between 1-100
|
||||||
|
// inclusively, where a larger value corresponds to a higher priority. This
|
||||||
|
// is useful when an operator wishes to push through a job deregistration
|
||||||
|
// in busy clusters with a large evaluation backlog.
|
||||||
|
EvalPriority int
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeregisterOpts is used to remove an existing job. See DeregisterOptions
|
// DeregisterOpts is used to remove an existing job. See DeregisterOptions
|
||||||
// for parameters.
|
// for parameters.
|
||||||
func (j *Jobs) DeregisterOpts(jobID string, opts *DeregisterOptions, q *WriteOptions) (string, *WriteMeta, error) {
|
func (j *Jobs) DeregisterOpts(jobID string, opts *DeregisterOptions, q *WriteOptions) (string, *WriteMeta, error) {
|
||||||
var resp JobDeregisterResponse
|
var resp JobDeregisterResponse
|
||||||
wm, err := j.client.delete(fmt.Sprintf("/v1/job/%v?purge=%t&global=%t",
|
|
||||||
url.PathEscape(jobID), opts.Purge, opts.Global), &resp, q)
|
// The base endpoint to add query params to.
|
||||||
|
endpoint := "/v1/job/" + url.PathEscape(jobID)
|
||||||
|
|
||||||
|
// Protect against nil opts. url.Values expects a string, and so using
|
||||||
|
// fmt.Sprintf is the best way to do this.
|
||||||
|
if opts != nil {
|
||||||
|
endpoint += fmt.Sprintf("?purge=%t&global=%t&eval_priority=%v",
|
||||||
|
opts.Purge, opts.Global, opts.EvalPriority)
|
||||||
|
}
|
||||||
|
|
||||||
|
wm, err := j.client.delete(endpoint, &resp, q)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", nil, err
|
||||||
}
|
}
|
||||||
|
@ -1170,6 +1189,14 @@ type JobRegisterRequest struct {
|
||||||
PolicyOverride bool `json:",omitempty"`
|
PolicyOverride bool `json:",omitempty"`
|
||||||
PreserveCounts bool `json:",omitempty"`
|
PreserveCounts bool `json:",omitempty"`
|
||||||
|
|
||||||
|
// EvalPriority is an optional priority to use on any evaluation created as
|
||||||
|
// a result on this job registration. This value must be between 1-100
|
||||||
|
// inclusively, where a larger value corresponds to a higher priority. This
|
||||||
|
// is useful when an operator wishes to push through a job registration in
|
||||||
|
// busy clusters with a large evaluation backlog. This avoids needing to
|
||||||
|
// change the job priority which also impacts preemption.
|
||||||
|
EvalPriority int `json:",omitempty"`
|
||||||
|
|
||||||
WriteRequest
|
WriteRequest
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
116
api/jobs_test.go
116
api/jobs_test.go
|
@ -187,6 +187,60 @@ func TestJobs_Register_NoPreserveCounts(t *testing.T) {
|
||||||
require.Equal(3, status.TaskGroups["group3"].Desired) // new => as specified
|
require.Equal(3, status.TaskGroups["group3"].Desired) // new => as specified
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestJobs_Register_EvalPriority(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
requireAssert := require.New(t)
|
||||||
|
|
||||||
|
c, s := makeClient(t, nil, nil)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
// Listing jobs before registering returns nothing
|
||||||
|
listResp, _, err := c.Jobs().List(nil)
|
||||||
|
requireAssert.Nil(err)
|
||||||
|
requireAssert.Len(listResp, 0)
|
||||||
|
|
||||||
|
// Create a job and register it with an eval priority.
|
||||||
|
job := testJob()
|
||||||
|
registerResp, wm, err := c.Jobs().RegisterOpts(job, &RegisterOptions{EvalPriority: 99}, nil)
|
||||||
|
requireAssert.Nil(err)
|
||||||
|
requireAssert.NotNil(registerResp)
|
||||||
|
requireAssert.NotEmpty(registerResp.EvalID)
|
||||||
|
assertWriteMeta(t, wm)
|
||||||
|
|
||||||
|
// Check the created job evaluation has a priority that matches our desired
|
||||||
|
// value.
|
||||||
|
evalInfo, _, err := c.Evaluations().Info(registerResp.EvalID, nil)
|
||||||
|
requireAssert.NoError(err)
|
||||||
|
requireAssert.Equal(99, evalInfo.Priority)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJobs_Register_NoEvalPriority(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
requireAssert := require.New(t)
|
||||||
|
|
||||||
|
c, s := makeClient(t, nil, nil)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
// Listing jobs before registering returns nothing
|
||||||
|
listResp, _, err := c.Jobs().List(nil)
|
||||||
|
requireAssert.Nil(err)
|
||||||
|
requireAssert.Len(listResp, 0)
|
||||||
|
|
||||||
|
// Create a job and register it with an eval priority.
|
||||||
|
job := testJob()
|
||||||
|
registerResp, wm, err := c.Jobs().RegisterOpts(job, nil, nil)
|
||||||
|
requireAssert.Nil(err)
|
||||||
|
requireAssert.NotNil(registerResp)
|
||||||
|
requireAssert.NotEmpty(registerResp.EvalID)
|
||||||
|
assertWriteMeta(t, wm)
|
||||||
|
|
||||||
|
// Check the created job evaluation has a priority that matches the job
|
||||||
|
// priority.
|
||||||
|
evalInfo, _, err := c.Evaluations().Info(registerResp.EvalID, nil)
|
||||||
|
requireAssert.NoError(err)
|
||||||
|
requireAssert.Equal(*job.Priority, evalInfo.Priority)
|
||||||
|
}
|
||||||
|
|
||||||
func TestJobs_Validate(t *testing.T) {
|
func TestJobs_Validate(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
c, s := makeClient(t, nil, nil)
|
c, s := makeClient(t, nil, nil)
|
||||||
|
@ -1628,6 +1682,68 @@ func TestJobs_Deregister(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestJobs_Deregister_EvalPriority(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
requireAssert := require.New(t)
|
||||||
|
|
||||||
|
c, s := makeClient(t, nil, nil)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
// Listing jobs before registering returns nothing
|
||||||
|
listResp, _, err := c.Jobs().List(nil)
|
||||||
|
requireAssert.Nil(err)
|
||||||
|
requireAssert.Len(listResp, 0)
|
||||||
|
|
||||||
|
// Create a job and register it.
|
||||||
|
job := testJob()
|
||||||
|
registerResp, wm, err := c.Jobs().Register(job, nil)
|
||||||
|
requireAssert.Nil(err)
|
||||||
|
requireAssert.NotNil(registerResp)
|
||||||
|
requireAssert.NotEmpty(registerResp.EvalID)
|
||||||
|
assertWriteMeta(t, wm)
|
||||||
|
|
||||||
|
// Deregister the job with an eval priority.
|
||||||
|
evalID, _, err := c.Jobs().DeregisterOpts(*job.ID, &DeregisterOptions{EvalPriority: 97}, nil)
|
||||||
|
requireAssert.NoError(err)
|
||||||
|
requireAssert.NotEmpty(t, evalID)
|
||||||
|
|
||||||
|
// Lookup the eval and check the priority on it.
|
||||||
|
evalInfo, _, err := c.Evaluations().Info(evalID, nil)
|
||||||
|
requireAssert.NoError(err)
|
||||||
|
requireAssert.Equal(97, evalInfo.Priority)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJobs_Deregister_NoEvalPriority(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
requireAssert := require.New(t)
|
||||||
|
|
||||||
|
c, s := makeClient(t, nil, nil)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
// Listing jobs before registering returns nothing
|
||||||
|
listResp, _, err := c.Jobs().List(nil)
|
||||||
|
requireAssert.Nil(err)
|
||||||
|
requireAssert.Len(listResp, 0)
|
||||||
|
|
||||||
|
// Create a job and register it.
|
||||||
|
job := testJob()
|
||||||
|
registerResp, wm, err := c.Jobs().Register(job, nil)
|
||||||
|
requireAssert.Nil(err)
|
||||||
|
requireAssert.NotNil(registerResp)
|
||||||
|
requireAssert.NotEmpty(registerResp.EvalID)
|
||||||
|
assertWriteMeta(t, wm)
|
||||||
|
|
||||||
|
// Deregister the job with an eval priority.
|
||||||
|
evalID, _, err := c.Jobs().DeregisterOpts(*job.ID, &DeregisterOptions{}, nil)
|
||||||
|
requireAssert.NoError(err)
|
||||||
|
requireAssert.NotEmpty(t, evalID)
|
||||||
|
|
||||||
|
// Lookup the eval and check the priority on it.
|
||||||
|
evalInfo, _, err := c.Evaluations().Info(evalID, nil)
|
||||||
|
requireAssert.NoError(err)
|
||||||
|
requireAssert.Equal(*job.Priority, evalInfo.Priority)
|
||||||
|
}
|
||||||
|
|
||||||
func TestJobs_ForceEvaluate(t *testing.T) {
|
func TestJobs_ForceEvaluate(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
c, s := makeClient(t, nil, nil)
|
c, s := makeClient(t, nil, nil)
|
||||||
|
|
|
@ -687,6 +687,19 @@ func parseBool(req *http.Request, field string) (*bool, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseInt parses a query parameter to a int or returns (nil, nil) if the
|
||||||
|
// parameter is not present.
|
||||||
|
func parseInt(req *http.Request, field string) (*int, error) {
|
||||||
|
if str := req.URL.Query().Get(field); str != "" {
|
||||||
|
param, err := strconv.Atoi(str)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a int: %v", field, str, err)
|
||||||
|
}
|
||||||
|
return ¶m, nil
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
// parseToken is used to parse the X-Nomad-Token param
|
// parseToken is used to parse the X-Nomad-Token param
|
||||||
func (s *HTTPServer) parseToken(req *http.Request, token *string) {
|
func (s *HTTPServer) parseToken(req *http.Request, token *string) {
|
||||||
if other := req.Header.Get("X-Nomad-Token"); other != "" {
|
if other := req.Header.Get("X-Nomad-Token"); other != "" {
|
||||||
|
|
|
@ -574,6 +574,53 @@ func TestParseBool(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_parseInt(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
Input string
|
||||||
|
Expected *int
|
||||||
|
Err bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Input: "",
|
||||||
|
Expected: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Input: "13",
|
||||||
|
Expected: helper.IntToPtr(13),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Input: "99",
|
||||||
|
Expected: helper.IntToPtr(99),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Input: "ten",
|
||||||
|
Err: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range cases {
|
||||||
|
tc := cases[i]
|
||||||
|
t.Run("Input-"+tc.Input, func(t *testing.T) {
|
||||||
|
testURL, err := url.Parse("http://localhost/foo?eval_priority=" + tc.Input)
|
||||||
|
require.NoError(t, err)
|
||||||
|
req := &http.Request{
|
||||||
|
URL: testURL,
|
||||||
|
}
|
||||||
|
|
||||||
|
result, err := parseInt(req, "eval_priority")
|
||||||
|
if tc.Err {
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Nil(t, result)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tc.Expected, result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestParsePagination(t *testing.T) {
|
func TestParsePagination(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
s := makeHTTPServer(t, nil)
|
s := makeHTTPServer(t, nil)
|
||||||
|
|
|
@ -390,6 +390,15 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate the evaluation priority if the user supplied a non-default
|
||||||
|
// value. It's more efficient to do it here, within the agent rather than
|
||||||
|
// sending a bad request for the server to reject.
|
||||||
|
if args.EvalPriority != 0 {
|
||||||
|
if err := validateEvalPriorityOpt(args.EvalPriority); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
sJob, writeReq := s.apiJobAndRequestToStructs(args.Job, req, args.WriteRequest)
|
sJob, writeReq := s.apiJobAndRequestToStructs(args.Job, req, args.WriteRequest)
|
||||||
regReq := structs.JobRegisterRequest{
|
regReq := structs.JobRegisterRequest{
|
||||||
Job: sJob,
|
Job: sJob,
|
||||||
|
@ -397,6 +406,7 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
|
||||||
JobModifyIndex: args.JobModifyIndex,
|
JobModifyIndex: args.JobModifyIndex,
|
||||||
PolicyOverride: args.PolicyOverride,
|
PolicyOverride: args.PolicyOverride,
|
||||||
PreserveCounts: args.PreserveCounts,
|
PreserveCounts: args.PreserveCounts,
|
||||||
|
EvalPriority: args.EvalPriority,
|
||||||
WriteRequest: *writeReq,
|
WriteRequest: *writeReq,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -411,6 +421,9 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
|
||||||
func (s *HTTPServer) jobDelete(resp http.ResponseWriter, req *http.Request,
|
func (s *HTTPServer) jobDelete(resp http.ResponseWriter, req *http.Request,
|
||||||
jobName string) (interface{}, error) {
|
jobName string) (interface{}, error) {
|
||||||
|
|
||||||
|
args := structs.JobDeregisterRequest{JobID: jobName}
|
||||||
|
|
||||||
|
// Identify the purge query param and parse.
|
||||||
purgeStr := req.URL.Query().Get("purge")
|
purgeStr := req.URL.Query().Get("purge")
|
||||||
var purgeBool bool
|
var purgeBool bool
|
||||||
if purgeStr != "" {
|
if purgeStr != "" {
|
||||||
|
@ -420,7 +433,9 @@ func (s *HTTPServer) jobDelete(resp http.ResponseWriter, req *http.Request,
|
||||||
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a bool: %v", "purge", purgeStr, err)
|
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a bool: %v", "purge", purgeStr, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
args.Purge = purgeBool
|
||||||
|
|
||||||
|
// Identify the global query param and parse.
|
||||||
globalStr := req.URL.Query().Get("global")
|
globalStr := req.URL.Query().Get("global")
|
||||||
var globalBool bool
|
var globalBool bool
|
||||||
if globalStr != "" {
|
if globalStr != "" {
|
||||||
|
@ -430,12 +445,24 @@ func (s *HTTPServer) jobDelete(resp http.ResponseWriter, req *http.Request,
|
||||||
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a bool: %v", "global", globalStr, err)
|
return nil, fmt.Errorf("Failed to parse value of %q (%v) as a bool: %v", "global", globalStr, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
args.Global = globalBool
|
||||||
|
|
||||||
args := structs.JobDeregisterRequest{
|
// Parse the eval priority from the request URL query if present.
|
||||||
JobID: jobName,
|
evalPriority, err := parseInt(req, "eval_priority")
|
||||||
Purge: purgeBool,
|
if err != nil {
|
||||||
Global: globalBool,
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Validate the evaluation priority if the user supplied a non-default
|
||||||
|
// value. It's more efficient to do it here, within the agent rather than
|
||||||
|
// sending a bad request for the server to reject.
|
||||||
|
if evalPriority != nil && *evalPriority > 0 {
|
||||||
|
if err := validateEvalPriorityOpt(*evalPriority); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
args.EvalPriority = *evalPriority
|
||||||
|
}
|
||||||
|
|
||||||
s.parseWriteRequest(req, &args.WriteRequest)
|
s.parseWriteRequest(req, &args.WriteRequest)
|
||||||
|
|
||||||
var out structs.JobDeregisterResponse
|
var out structs.JobDeregisterResponse
|
||||||
|
@ -1661,3 +1688,12 @@ func ApiSpreadToStructs(a1 *api.Spread) *structs.Spread {
|
||||||
}
|
}
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateEvalPriorityOpt ensures the supplied evaluation priority override
|
||||||
|
// value is within acceptable bounds.
|
||||||
|
func validateEvalPriorityOpt(priority int) HTTPCodedError {
|
||||||
|
if priority < 1 || priority > 100 {
|
||||||
|
return CodedError(400, "Eval priority must be between 1 and 100 inclusively")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -596,6 +597,101 @@ func TestHTTP_JobUpdate(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHTTP_JobUpdate_EvalPriority(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
inputEvalPriority int
|
||||||
|
expectedError bool
|
||||||
|
name string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
inputEvalPriority: 95,
|
||||||
|
expectedError: false,
|
||||||
|
name: "valid input eval priority",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
inputEvalPriority: 99999999999,
|
||||||
|
expectedError: true,
|
||||||
|
name: "invalid input eval priority",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
inputEvalPriority: 0,
|
||||||
|
expectedError: false,
|
||||||
|
name: "no input eval priority",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|
||||||
|
httpTest(t, nil, func(s *TestAgent) {
|
||||||
|
// Create the job
|
||||||
|
job := MockJob()
|
||||||
|
args := api.JobRegisterRequest{
|
||||||
|
Job: job,
|
||||||
|
WriteRequest: api.WriteRequest{
|
||||||
|
Region: "global",
|
||||||
|
Namespace: api.DefaultNamespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add our eval priority query param if set.
|
||||||
|
if tc.inputEvalPriority > 0 {
|
||||||
|
args.EvalPriority = tc.inputEvalPriority
|
||||||
|
}
|
||||||
|
buf := encodeReq(args)
|
||||||
|
|
||||||
|
// Make the HTTP request
|
||||||
|
req, err := http.NewRequest("PUT", "/v1/job/"+*job.ID, buf)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
respW := httptest.NewRecorder()
|
||||||
|
|
||||||
|
// Make the request
|
||||||
|
obj, err := s.Server.JobSpecificRequest(respW, req)
|
||||||
|
if tc.expectedError {
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
assert.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the response
|
||||||
|
regResp := obj.(structs.JobRegisterResponse)
|
||||||
|
assert.NotEmpty(t, regResp.EvalID)
|
||||||
|
assert.NotEmpty(t, respW.Result().Header.Get("X-Nomad-Index"))
|
||||||
|
|
||||||
|
// Check the job is registered
|
||||||
|
getReq := structs.JobSpecificRequest{
|
||||||
|
JobID: *job.ID,
|
||||||
|
QueryOptions: structs.QueryOptions{
|
||||||
|
Region: "global",
|
||||||
|
Namespace: structs.DefaultNamespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var getResp structs.SingleJobResponse
|
||||||
|
assert.Nil(t, s.Agent.RPC("Job.GetJob", &getReq, &getResp))
|
||||||
|
assert.NotNil(t, getResp.Job)
|
||||||
|
|
||||||
|
// Check the evaluation that resulted from the job register.
|
||||||
|
evalInfoReq, err := http.NewRequest("GET", "/v1/evaluation/"+regResp.EvalID, nil)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
respW.Flush()
|
||||||
|
|
||||||
|
evalRaw, err := s.Server.EvalSpecificRequest(respW, evalInfoReq)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
evalRespObj := evalRaw.(*structs.Evaluation)
|
||||||
|
|
||||||
|
if tc.inputEvalPriority > 0 {
|
||||||
|
assert.Equal(t, tc.inputEvalPriority, evalRespObj.Priority)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, *job.Priority, evalRespObj.Priority)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestHTTP_JobUpdateRegion(t *testing.T) {
|
func TestHTTP_JobUpdateRegion(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
@ -797,6 +893,117 @@ func TestHTTP_JobDelete(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHTTP_JobDelete_EvalPriority(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
inputEvalPriority int
|
||||||
|
expectedError bool
|
||||||
|
name string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
inputEvalPriority: 95,
|
||||||
|
expectedError: false,
|
||||||
|
name: "valid input eval priority",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
inputEvalPriority: 99999999999,
|
||||||
|
expectedError: true,
|
||||||
|
name: "invalid input eval priority",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
inputEvalPriority: 0,
|
||||||
|
expectedError: false,
|
||||||
|
name: "no input eval priority",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|
||||||
|
httpTest(t, nil, func(s *TestAgent) {
|
||||||
|
// Create the job
|
||||||
|
job := MockJob()
|
||||||
|
args := api.JobRegisterRequest{
|
||||||
|
Job: job,
|
||||||
|
WriteRequest: api.WriteRequest{
|
||||||
|
Region: "global",
|
||||||
|
Namespace: api.DefaultNamespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
buf := encodeReq(args)
|
||||||
|
|
||||||
|
// Make the HTTP request
|
||||||
|
regReq, err := http.NewRequest("PUT", "/v1/job/"+*job.ID, buf)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
respW := httptest.NewRecorder()
|
||||||
|
|
||||||
|
// Make the request
|
||||||
|
obj, err := s.Server.JobSpecificRequest(respW, regReq)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
|
||||||
|
// Check the response
|
||||||
|
regResp := obj.(structs.JobRegisterResponse)
|
||||||
|
assert.NotEmpty(t, regResp.EvalID)
|
||||||
|
assert.NotEmpty(t, respW.Result().Header.Get("X-Nomad-Index"))
|
||||||
|
|
||||||
|
// Check the job is registered
|
||||||
|
getReq := structs.JobSpecificRequest{
|
||||||
|
JobID: *job.ID,
|
||||||
|
QueryOptions: structs.QueryOptions{
|
||||||
|
Region: "global",
|
||||||
|
Namespace: structs.DefaultNamespace,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var getResp structs.SingleJobResponse
|
||||||
|
assert.Nil(t, s.Agent.RPC("Job.GetJob", &getReq, &getResp))
|
||||||
|
assert.NotNil(t, getResp.Job)
|
||||||
|
|
||||||
|
// Delete the job.
|
||||||
|
deleteReq, err := http.NewRequest("DELETE", "/v1/job/"+*job.ID+"?purge=true", nil)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
respW.Flush()
|
||||||
|
|
||||||
|
// Add our eval priority query param if set.
|
||||||
|
if tc.inputEvalPriority > 0 {
|
||||||
|
q := deleteReq.URL.Query()
|
||||||
|
q.Add("eval_priority", strconv.Itoa(tc.inputEvalPriority))
|
||||||
|
deleteReq.URL.RawQuery = q.Encode()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the request
|
||||||
|
obj, err = s.Server.JobSpecificRequest(respW, deleteReq)
|
||||||
|
if tc.expectedError {
|
||||||
|
assert.NotNil(t, err)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
assert.Nil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the response
|
||||||
|
dereg := obj.(structs.JobDeregisterResponse)
|
||||||
|
assert.NotEmpty(t, dereg.EvalID)
|
||||||
|
assert.NotEmpty(t, respW.Result().Header.Get("X-Nomad-Index"))
|
||||||
|
|
||||||
|
// Check the evaluation that resulted from the job register.
|
||||||
|
evalInfoReq, err := http.NewRequest("GET", "/v1/evaluation/"+dereg.EvalID, nil)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
respW.Flush()
|
||||||
|
|
||||||
|
evalRaw, err := s.Server.EvalSpecificRequest(respW, evalInfoReq)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
evalRespObj := evalRaw.(*structs.Evaluation)
|
||||||
|
|
||||||
|
if tc.inputEvalPriority > 0 {
|
||||||
|
assert.Equal(t, tc.inputEvalPriority, evalRespObj.Priority)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, *job.Priority, evalRespObj.Priority)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestHTTP_Job_ScaleTaskGroup(t *testing.T) {
|
func TestHTTP_Job_ScaleTaskGroup(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
|
|
@ -86,6 +86,10 @@ Run Options:
|
||||||
the evaluation ID will be printed to the screen, which can be used to
|
the evaluation ID will be printed to the screen, which can be used to
|
||||||
examine the evaluation using the eval-status command.
|
examine the evaluation using the eval-status command.
|
||||||
|
|
||||||
|
-eval-priority
|
||||||
|
Override the priority of the evaluations produced as a result of this job
|
||||||
|
submission. By default, this is set to the priority of the job.
|
||||||
|
|
||||||
-hcl1
|
-hcl1
|
||||||
Parses the job file as HCLv1.
|
Parses the job file as HCLv1.
|
||||||
|
|
||||||
|
@ -152,6 +156,7 @@ func (c *JobRunCommand) AutocompleteFlags() complete.Flags {
|
||||||
"-hcl2-strict": complete.PredictNothing,
|
"-hcl2-strict": complete.PredictNothing,
|
||||||
"-var": complete.PredictAnything,
|
"-var": complete.PredictAnything,
|
||||||
"-var-file": complete.PredictFiles("*.var"),
|
"-var-file": complete.PredictFiles("*.var"),
|
||||||
|
"-eval-priority": complete.PredictNothing,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,6 +170,7 @@ func (c *JobRunCommand) Run(args []string) int {
|
||||||
var detach, verbose, output, override, preserveCounts, hcl2Strict bool
|
var detach, verbose, output, override, preserveCounts, hcl2Strict bool
|
||||||
var checkIndexStr, consulToken, consulNamespace, vaultToken, vaultNamespace string
|
var checkIndexStr, consulToken, consulNamespace, vaultToken, vaultNamespace string
|
||||||
var varArgs, varFiles flaghelper.StringFlag
|
var varArgs, varFiles flaghelper.StringFlag
|
||||||
|
var evalPriority int
|
||||||
|
|
||||||
flagSet := c.Meta.FlagSet(c.Name(), FlagSetClient)
|
flagSet := c.Meta.FlagSet(c.Name(), FlagSetClient)
|
||||||
flagSet.Usage = func() { c.Ui.Output(c.Help()) }
|
flagSet.Usage = func() { c.Ui.Output(c.Help()) }
|
||||||
|
@ -182,6 +188,7 @@ func (c *JobRunCommand) Run(args []string) int {
|
||||||
flagSet.StringVar(&vaultNamespace, "vault-namespace", "", "")
|
flagSet.StringVar(&vaultNamespace, "vault-namespace", "", "")
|
||||||
flagSet.Var(&varArgs, "var", "")
|
flagSet.Var(&varArgs, "var", "")
|
||||||
flagSet.Var(&varFiles, "var-file", "")
|
flagSet.Var(&varFiles, "var-file", "")
|
||||||
|
flagSet.IntVar(&evalPriority, "eval-priority", 0, "")
|
||||||
|
|
||||||
if err := flagSet.Parse(args); err != nil {
|
if err := flagSet.Parse(args); err != nil {
|
||||||
return 1
|
return 1
|
||||||
|
@ -282,13 +289,15 @@ func (c *JobRunCommand) Run(args []string) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the register options
|
// Set the register options
|
||||||
opts := &api.RegisterOptions{}
|
opts := &api.RegisterOptions{
|
||||||
|
PolicyOverride: override,
|
||||||
|
PreserveCounts: preserveCounts,
|
||||||
|
EvalPriority: evalPriority,
|
||||||
|
}
|
||||||
if enforce {
|
if enforce {
|
||||||
opts.EnforceIndex = true
|
opts.EnforceIndex = true
|
||||||
opts.ModifyIndex = checkIndex
|
opts.ModifyIndex = checkIndex
|
||||||
}
|
}
|
||||||
opts.PolicyOverride = override
|
|
||||||
opts.PreserveCounts = preserveCounts
|
|
||||||
|
|
||||||
// Submit the job
|
// Submit the job
|
||||||
resp, _, err := client.Jobs().RegisterOpts(job, opts, nil)
|
resp, _, err := client.Jobs().RegisterOpts(job, opts, nil)
|
||||||
|
|
|
@ -39,6 +39,10 @@ Stop Options:
|
||||||
screen, which can be used to examine the evaluation using the eval-status
|
screen, which can be used to examine the evaluation using the eval-status
|
||||||
command.
|
command.
|
||||||
|
|
||||||
|
-eval-priority
|
||||||
|
Override the priority of the evaluations produced as a result of this job
|
||||||
|
deregistration. By default, this is set to the priority of the job.
|
||||||
|
|
||||||
-purge
|
-purge
|
||||||
Purge is used to stop the job and purge it from the system. If not set, the
|
Purge is used to stop the job and purge it from the system. If not set, the
|
||||||
job will still be queryable and will be purged by the garbage collector.
|
job will still be queryable and will be purged by the garbage collector.
|
||||||
|
@ -64,6 +68,7 @@ func (c *JobStopCommand) AutocompleteFlags() complete.Flags {
|
||||||
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
|
||||||
complete.Flags{
|
complete.Flags{
|
||||||
"-detach": complete.PredictNothing,
|
"-detach": complete.PredictNothing,
|
||||||
|
"-eval-priority": complete.PredictNothing,
|
||||||
"-purge": complete.PredictNothing,
|
"-purge": complete.PredictNothing,
|
||||||
"-global": complete.PredictNothing,
|
"-global": complete.PredictNothing,
|
||||||
"-yes": complete.PredictNothing,
|
"-yes": complete.PredictNothing,
|
||||||
|
@ -90,6 +95,7 @@ func (c *JobStopCommand) Name() string { return "job stop" }
|
||||||
|
|
||||||
func (c *JobStopCommand) Run(args []string) int {
|
func (c *JobStopCommand) Run(args []string) int {
|
||||||
var detach, purge, verbose, global, autoYes bool
|
var detach, purge, verbose, global, autoYes bool
|
||||||
|
var evalPriority int
|
||||||
|
|
||||||
flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
|
flags := c.Meta.FlagSet(c.Name(), FlagSetClient)
|
||||||
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
flags.Usage = func() { c.Ui.Output(c.Help()) }
|
||||||
|
@ -98,6 +104,7 @@ func (c *JobStopCommand) Run(args []string) int {
|
||||||
flags.BoolVar(&global, "global", false, "")
|
flags.BoolVar(&global, "global", false, "")
|
||||||
flags.BoolVar(&autoYes, "yes", false, "")
|
flags.BoolVar(&autoYes, "yes", false, "")
|
||||||
flags.BoolVar(&purge, "purge", false, "")
|
flags.BoolVar(&purge, "purge", false, "")
|
||||||
|
flags.IntVar(&evalPriority, "eval-priority", 0, "")
|
||||||
|
|
||||||
if err := flags.Parse(args); err != nil {
|
if err := flags.Parse(args); err != nil {
|
||||||
return 1
|
return 1
|
||||||
|
@ -192,7 +199,7 @@ func (c *JobStopCommand) Run(args []string) int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invoke the stop
|
// Invoke the stop
|
||||||
opts := &api.DeregisterOptions{Purge: purge, Global: global}
|
opts := &api.DeregisterOptions{Purge: purge, Global: global, EvalPriority: evalPriority}
|
||||||
wq := &api.WriteOptions{Namespace: jobs[0].JobSummary.Namespace}
|
wq := &api.WriteOptions{Namespace: jobs[0].JobSummary.Namespace}
|
||||||
evalID, _, err := client.Jobs().DeregisterOpts(*job.ID, opts, wq)
|
evalID, _, err := client.Jobs().DeregisterOpts(*job.ID, opts, wq)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -10,12 +10,12 @@ import (
|
||||||
|
|
||||||
_ "github.com/hashicorp/nomad/e2e/affinities"
|
_ "github.com/hashicorp/nomad/e2e/affinities"
|
||||||
_ "github.com/hashicorp/nomad/e2e/clientstate"
|
_ "github.com/hashicorp/nomad/e2e/clientstate"
|
||||||
|
|
||||||
_ "github.com/hashicorp/nomad/e2e/connect"
|
_ "github.com/hashicorp/nomad/e2e/connect"
|
||||||
_ "github.com/hashicorp/nomad/e2e/consul"
|
_ "github.com/hashicorp/nomad/e2e/consul"
|
||||||
_ "github.com/hashicorp/nomad/e2e/consultemplate"
|
_ "github.com/hashicorp/nomad/e2e/consultemplate"
|
||||||
_ "github.com/hashicorp/nomad/e2e/csi"
|
_ "github.com/hashicorp/nomad/e2e/csi"
|
||||||
_ "github.com/hashicorp/nomad/e2e/deployment"
|
_ "github.com/hashicorp/nomad/e2e/deployment"
|
||||||
|
_ "github.com/hashicorp/nomad/e2e/eval_priority"
|
||||||
_ "github.com/hashicorp/nomad/e2e/events"
|
_ "github.com/hashicorp/nomad/e2e/events"
|
||||||
_ "github.com/hashicorp/nomad/e2e/example"
|
_ "github.com/hashicorp/nomad/e2e/example"
|
||||||
_ "github.com/hashicorp/nomad/e2e/isolation"
|
_ "github.com/hashicorp/nomad/e2e/isolation"
|
||||||
|
|
|
@ -12,7 +12,24 @@ import (
|
||||||
// Register registers a jobspec from a file but with a unique ID.
|
// Register registers a jobspec from a file but with a unique ID.
|
||||||
// The caller is responsible for recording that ID for later cleanup.
|
// The caller is responsible for recording that ID for later cleanup.
|
||||||
func Register(jobID, jobFilePath string) error {
|
func Register(jobID, jobFilePath string) error {
|
||||||
cmd := exec.Command("nomad", "job", "run", "-detach", "-")
|
return register(jobID, jobFilePath, exec.Command("nomad", "job", "run", "-detach", "-"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterWithArgs registers a jobspec from a file but with a unique ID. The
|
||||||
|
// optional args are added to the run command. The caller is responsible for
|
||||||
|
// recording that ID for later cleanup.
|
||||||
|
func RegisterWithArgs(jobID, jobFilePath string, args ...string) error {
|
||||||
|
|
||||||
|
baseArgs := []string{"job", "run", "-detach"}
|
||||||
|
for i := range args {
|
||||||
|
baseArgs = append(baseArgs, args[i])
|
||||||
|
}
|
||||||
|
baseArgs = append(baseArgs, "-")
|
||||||
|
|
||||||
|
return register(jobID, jobFilePath, exec.Command("nomad", baseArgs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func register(jobID, jobFilePath string, cmd *exec.Cmd) error {
|
||||||
stdin, err := cmd.StdinPipe()
|
stdin, err := cmd.StdinPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not open stdin?: %w", err)
|
return fmt.Errorf("could not open stdin?: %w", err)
|
||||||
|
|
|
@ -0,0 +1,189 @@
|
||||||
|
package eval_priority
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/nomad/api"
|
||||||
|
"github.com/hashicorp/nomad/e2e/e2eutil"
|
||||||
|
"github.com/hashicorp/nomad/e2e/framework"
|
||||||
|
"github.com/hashicorp/nomad/helper/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type EvalPriorityTest struct {
|
||||||
|
framework.TC
|
||||||
|
jobIDs []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
framework.AddSuites(&framework.TestSuite{
|
||||||
|
Component: "EvalPriority",
|
||||||
|
CanRunLocal: true,
|
||||||
|
Cases: []framework.TestCase{
|
||||||
|
new(EvalPriorityTest),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *EvalPriorityTest) BeforeAll(f *framework.F) {
|
||||||
|
e2eutil.WaitForLeader(f.T(), tc.Nomad())
|
||||||
|
e2eutil.WaitForNodesReady(f.T(), tc.Nomad(), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *EvalPriorityTest) AfterEach(f *framework.F) {
|
||||||
|
for _, id := range tc.jobIDs {
|
||||||
|
_, _, err := tc.Nomad().Jobs().Deregister(id, true, nil)
|
||||||
|
f.NoError(err)
|
||||||
|
}
|
||||||
|
tc.jobIDs = []string{}
|
||||||
|
|
||||||
|
_, err := e2eutil.Command("nomad", "system", "gc")
|
||||||
|
f.NoError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestEvalPrioritySet performs a test which registers, updates, and
|
||||||
|
// deregsiters a job setting the eval priority on every call.
|
||||||
|
func (tc *EvalPriorityTest) TestEvalPrioritySet(f *framework.F) {
|
||||||
|
|
||||||
|
// Generate a jobID and attempt to register the job using the eval
|
||||||
|
// priority. In case there is a problem found here and the job registers,
|
||||||
|
// we need to ensure it gets cleaned up.
|
||||||
|
jobID := "test-eval-priority-" + uuid.Generate()[0:8]
|
||||||
|
f.NoError(e2eutil.RegisterWithArgs(jobID, "eval_priority/inputs/thirteen_job_priority.nomad",
|
||||||
|
"-eval-priority=80"))
|
||||||
|
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||||
|
|
||||||
|
// Wait for the deployment to finish.
|
||||||
|
f.NoError(e2eutil.WaitForLastDeploymentStatus(jobID, "default", "successful", nil))
|
||||||
|
|
||||||
|
// Pull the job evaluation list from the API and ensure that this didn't
|
||||||
|
// error and contains two evals.
|
||||||
|
//
|
||||||
|
// Eval 1: the job registration eval.
|
||||||
|
// Eval 2: the deployment watcher eval.
|
||||||
|
registerEvals, _, err := tc.Nomad().Jobs().Evaluations(jobID, nil)
|
||||||
|
f.NoError(err)
|
||||||
|
f.Len(registerEvals, 2, "job expected to have two evals")
|
||||||
|
|
||||||
|
// seenEvals tracks the evaluations we have tested for priority quality
|
||||||
|
// against our expected value. This allows us to easily perform multiple
|
||||||
|
// checks with confidence.
|
||||||
|
seenEvals := map[string]bool{}
|
||||||
|
|
||||||
|
// All evaluations should have the priority set to the overridden priority.
|
||||||
|
for _, eval := range registerEvals {
|
||||||
|
f.Equal(80, eval.Priority)
|
||||||
|
seenEvals[eval.ID] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the job image and set an eval priority higher than the job
|
||||||
|
// priority.
|
||||||
|
f.NoError(e2eutil.RegisterWithArgs(jobID, "eval_priority/inputs/thirteen_job_priority.nomad",
|
||||||
|
"-eval-priority=7", "-var", "image=busybox:1.34"))
|
||||||
|
f.NoError(e2eutil.WaitForLastDeploymentStatus(jobID, "default", "successful",
|
||||||
|
&e2eutil.WaitConfig{Retries: 200}))
|
||||||
|
|
||||||
|
// Pull the latest list of evaluations for the job which will include those
|
||||||
|
// as a result of the job update.
|
||||||
|
updateEvals, _, err := tc.Nomad().Jobs().Evaluations(jobID, nil)
|
||||||
|
f.NoError(err)
|
||||||
|
f.NotNil(updateEvals, "expected non-nil evaluation list response")
|
||||||
|
f.NotEmpty(updateEvals, "expected non-empty evaluation list response")
|
||||||
|
|
||||||
|
// Iterate the evals, ignoring those we have already seen and check their
|
||||||
|
// priority is as expected.
|
||||||
|
for _, eval := range updateEvals {
|
||||||
|
if ok := seenEvals[eval.ID]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
f.Equal(7, eval.Priority)
|
||||||
|
seenEvals[eval.ID] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deregister the job using an increased priority.
|
||||||
|
deregOpts := api.DeregisterOptions{EvalPriority: 100, Purge: true}
|
||||||
|
deregEvalID, _, err := tc.Nomad().Jobs().DeregisterOpts(jobID, &deregOpts, nil)
|
||||||
|
f.NoError(err)
|
||||||
|
f.NotEmpty(deregEvalID, "expected non-empty evaluation ID")
|
||||||
|
|
||||||
|
// Detail the deregistration evaluation and check its priority.
|
||||||
|
evalInfo, _, err := tc.Nomad().Evaluations().Info(deregEvalID, nil)
|
||||||
|
f.NoError(err)
|
||||||
|
f.Equal(100, evalInfo.Priority)
|
||||||
|
|
||||||
|
// If the job was successfully purged, clear the test suite state.
|
||||||
|
if err == nil {
|
||||||
|
tc.jobIDs = []string{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestEvalPriorityNotSet performs a test which registers, updates, and
|
||||||
|
// deregsiters a job never setting the eval priority.
|
||||||
|
func (tc *EvalPriorityTest) TestEvalPriorityNotSet(f *framework.F) {
|
||||||
|
|
||||||
|
// Generate a jobID and attempt to register the job using the eval
|
||||||
|
// priority. In case there is a problem found here and the job registers,
|
||||||
|
// we need to ensure it gets cleaned up.
|
||||||
|
jobID := "test-eval-priority-" + uuid.Generate()[0:8]
|
||||||
|
f.NoError(e2eutil.Register(jobID, "eval_priority/inputs/thirteen_job_priority.nomad"))
|
||||||
|
tc.jobIDs = append(tc.jobIDs, jobID)
|
||||||
|
|
||||||
|
// Wait for the deployment to finish.
|
||||||
|
f.NoError(e2eutil.WaitForLastDeploymentStatus(jobID, "default", "successful", nil))
|
||||||
|
|
||||||
|
// Pull the job evaluation list from the API and ensure that this didn't
|
||||||
|
// error and contains two evals.
|
||||||
|
//
|
||||||
|
// Eval 1: the job registration eval.
|
||||||
|
// Eval 2: the deployment watcher eval.
|
||||||
|
registerEvals, _, err := tc.Nomad().Jobs().Evaluations(jobID, nil)
|
||||||
|
f.NoError(err)
|
||||||
|
f.Len(registerEvals, 2, "job expected to have two evals")
|
||||||
|
|
||||||
|
// seenEvals tracks the evaluations we have tested for priority quality
|
||||||
|
// against our expected value. This allows us to easily perform multiple
|
||||||
|
// checks with confidence.
|
||||||
|
seenEvals := map[string]bool{}
|
||||||
|
|
||||||
|
// All evaluations should have the priority set to the job priority.
|
||||||
|
for _, eval := range registerEvals {
|
||||||
|
f.Equal(13, eval.Priority)
|
||||||
|
seenEvals[eval.ID] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the job image without setting an eval priority.
|
||||||
|
f.NoError(e2eutil.RegisterWithArgs(jobID, "eval_priority/inputs/thirteen_job_priority.nomad",
|
||||||
|
"-var", "image=busybox:1.34"))
|
||||||
|
f.NoError(e2eutil.WaitForLastDeploymentStatus(jobID, "default", "successful",
|
||||||
|
&e2eutil.WaitConfig{Retries: 200}))
|
||||||
|
|
||||||
|
// Pull the latest list of evaluations for the job which will include those
|
||||||
|
// as a result of the job update.
|
||||||
|
updateEvals, _, err := tc.Nomad().Jobs().Evaluations(jobID, nil)
|
||||||
|
f.NoError(err)
|
||||||
|
f.NotNil(updateEvals, "expected non-nil evaluation list response")
|
||||||
|
f.NotEmpty(updateEvals, "expected non-empty evaluation list response")
|
||||||
|
|
||||||
|
// Iterate the evals, ignoring those we have already seen and check their
|
||||||
|
// priority is as expected.
|
||||||
|
for _, eval := range updateEvals {
|
||||||
|
if ok := seenEvals[eval.ID]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
f.Equal(13, eval.Priority)
|
||||||
|
seenEvals[eval.ID] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deregister the job without setting an eval priority.
|
||||||
|
deregOpts := api.DeregisterOptions{Purge: true}
|
||||||
|
deregEvalID, _, err := tc.Nomad().Jobs().DeregisterOpts(jobID, &deregOpts, nil)
|
||||||
|
f.NoError(err)
|
||||||
|
f.NotEmpty(deregEvalID, "expected non-empty evaluation ID")
|
||||||
|
|
||||||
|
// Detail the deregistration evaluation and check its priority.
|
||||||
|
evalInfo, _, err := tc.Nomad().Evaluations().Info(deregEvalID, nil)
|
||||||
|
f.NoError(err)
|
||||||
|
f.Equal(13, evalInfo.Priority)
|
||||||
|
|
||||||
|
// If the job was successfully purged, clear the test suite state.
|
||||||
|
if err == nil {
|
||||||
|
tc.jobIDs = []string{}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
variable "image" { default = "busybox:1" }
|
||||||
|
|
||||||
|
job "networking" {
|
||||||
|
datacenters = ["dc1", "dc2"]
|
||||||
|
priority = 13
|
||||||
|
constraint {
|
||||||
|
attribute = "${attr.kernel.name}"
|
||||||
|
value = "linux"
|
||||||
|
}
|
||||||
|
group "bridged" {
|
||||||
|
task "sleep" {
|
||||||
|
driver = "docker"
|
||||||
|
config {
|
||||||
|
image = var.image
|
||||||
|
command = "/bin/sleep"
|
||||||
|
args = ["300"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -824,10 +824,20 @@ func (w *deploymentWatcher) createBatchedUpdate(allowReplacements []string, forI
|
||||||
// getEval returns an evaluation suitable for the deployment
|
// getEval returns an evaluation suitable for the deployment
|
||||||
func (w *deploymentWatcher) getEval() *structs.Evaluation {
|
func (w *deploymentWatcher) getEval() *structs.Evaluation {
|
||||||
now := time.Now().UTC().UnixNano()
|
now := time.Now().UTC().UnixNano()
|
||||||
|
|
||||||
|
// During a server upgrade it's possible we end up with deployments created
|
||||||
|
// on the previous version that are then "watched" on a leader that's on
|
||||||
|
// the new version. This would result in an eval with its priority set to
|
||||||
|
// zero which would be bad. This therefore protects against that.
|
||||||
|
priority := w.d.EvalPriority
|
||||||
|
if priority == 0 {
|
||||||
|
priority = w.j.Priority
|
||||||
|
}
|
||||||
|
|
||||||
return &structs.Evaluation{
|
return &structs.Evaluation{
|
||||||
ID: uuid.Generate(),
|
ID: uuid.Generate(),
|
||||||
Namespace: w.j.Namespace,
|
Namespace: w.j.Namespace,
|
||||||
Priority: w.j.Priority,
|
Priority: priority,
|
||||||
Type: w.j.Type,
|
Type: w.j.Type,
|
||||||
TriggeredBy: structs.EvalTriggerDeploymentWatcher,
|
TriggeredBy: structs.EvalTriggerDeploymentWatcher,
|
||||||
JobID: w.j.ID,
|
JobID: w.j.ID,
|
||||||
|
|
|
@ -357,10 +357,18 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis
|
||||||
|
|
||||||
// If the job is periodic or parameterized, we don't create an eval.
|
// If the job is periodic or parameterized, we don't create an eval.
|
||||||
if !(args.Job.IsPeriodic() || args.Job.IsParameterized()) {
|
if !(args.Job.IsPeriodic() || args.Job.IsParameterized()) {
|
||||||
|
|
||||||
|
// Initially set the eval priority to that of the job priority. If the
|
||||||
|
// user supplied an eval priority override, we subsequently use this.
|
||||||
|
evalPriority := args.Job.Priority
|
||||||
|
if args.EvalPriority > 0 {
|
||||||
|
evalPriority = args.EvalPriority
|
||||||
|
}
|
||||||
|
|
||||||
eval = &structs.Evaluation{
|
eval = &structs.Evaluation{
|
||||||
ID: uuid.Generate(),
|
ID: uuid.Generate(),
|
||||||
Namespace: args.RequestNamespace(),
|
Namespace: args.RequestNamespace(),
|
||||||
Priority: args.Job.Priority,
|
Priority: evalPriority,
|
||||||
Type: args.Job.Type,
|
Type: args.Job.Type,
|
||||||
TriggeredBy: structs.EvalTriggerJobRegister,
|
TriggeredBy: structs.EvalTriggerJobRegister,
|
||||||
JobID: args.Job.ID,
|
JobID: args.Job.ID,
|
||||||
|
@ -829,22 +837,23 @@ func (j *Job) Deregister(args *structs.JobDeregisterRequest, reply *structs.JobD
|
||||||
// priority even if the job was.
|
// priority even if the job was.
|
||||||
now := time.Now().UnixNano()
|
now := time.Now().UnixNano()
|
||||||
|
|
||||||
// Set our default priority initially, but update this to that configured
|
// If the job is periodic or parameterized, we don't create an eval.
|
||||||
// within the job if possible. It is reasonable from a user perspective
|
if job == nil || !(job.IsPeriodic() || job.IsParameterized()) {
|
||||||
// that jobs with a higher priority have their deregister evaluated before
|
|
||||||
// those of a lower priority.
|
// The evaluation priority is determined by several factors. It
|
||||||
|
// defaults to the job default priority and is overridden by the
|
||||||
|
// priority set on the job specification.
|
||||||
//
|
//
|
||||||
// Alternatively, the previous behaviour was to set the eval priority to
|
// If the user supplied an eval priority override, we subsequently
|
||||||
// the default value. Jobs with a lower than default register priority
|
// use this.
|
||||||
// would therefore have their deregister eval priorities higher than
|
|
||||||
// expected.
|
|
||||||
priority := structs.JobDefaultPriority
|
priority := structs.JobDefaultPriority
|
||||||
if job != nil {
|
if job != nil {
|
||||||
priority = job.Priority
|
priority = job.Priority
|
||||||
}
|
}
|
||||||
|
if args.EvalPriority > 0 {
|
||||||
|
priority = args.EvalPriority
|
||||||
|
}
|
||||||
|
|
||||||
// If the job is periodic or parameterized, we don't create an eval.
|
|
||||||
if job == nil || !(job.IsPeriodic() || job.IsParameterized()) {
|
|
||||||
eval = &structs.Evaluation{
|
eval = &structs.Evaluation{
|
||||||
ID: uuid.Generate(),
|
ID: uuid.Generate(),
|
||||||
Namespace: args.RequestNamespace(),
|
Namespace: args.RequestNamespace(),
|
||||||
|
|
|
@ -169,6 +169,38 @@ func TestJobEndpoint_Register_PreserveCounts(t *testing.T) {
|
||||||
require.Equal(2, out.TaskGroups[1].Count) // should be as in job spec
|
require.Equal(2, out.TaskGroups[1].Count) // should be as in job spec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestJobEndpoint_Register_EvalPriority(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
requireAssert := require.New(t)
|
||||||
|
|
||||||
|
s1, cleanupS1 := TestServer(t, func(c *Config) { c.NumSchedulers = 0 })
|
||||||
|
defer cleanupS1()
|
||||||
|
codec := rpcClient(t, s1)
|
||||||
|
testutil.WaitForLeader(t, s1.RPC)
|
||||||
|
|
||||||
|
// Create the register request
|
||||||
|
job := mock.Job()
|
||||||
|
job.TaskGroups[0].Name = "group1"
|
||||||
|
job.Canonicalize()
|
||||||
|
|
||||||
|
// Register the job.
|
||||||
|
requireAssert.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", &structs.JobRegisterRequest{
|
||||||
|
Job: job,
|
||||||
|
WriteRequest: structs.WriteRequest{
|
||||||
|
Region: "global",
|
||||||
|
Namespace: job.Namespace,
|
||||||
|
},
|
||||||
|
EvalPriority: 99,
|
||||||
|
}, &structs.JobRegisterResponse{}))
|
||||||
|
|
||||||
|
// Grab the eval from the state, and check its priority is as expected.
|
||||||
|
state := s1.fsm.State()
|
||||||
|
out, err := state.EvalsByJob(nil, job.Namespace, job.ID)
|
||||||
|
requireAssert.NoError(err)
|
||||||
|
requireAssert.Len(out, 1)
|
||||||
|
requireAssert.Equal(99, out[0].Priority)
|
||||||
|
}
|
||||||
|
|
||||||
func TestJobEndpoint_Register_Connect(t *testing.T) {
|
func TestJobEndpoint_Register_Connect(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
@ -3365,6 +3397,48 @@ func TestJobEndpoint_Deregister_Nonexistent(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestJobEndpoint_Deregister_EvalPriority(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
requireAssert := require.New(t)
|
||||||
|
|
||||||
|
s1, cleanupS1 := TestServer(t, func(c *Config) {
|
||||||
|
c.NumSchedulers = 0 // Prevent automatic dequeue
|
||||||
|
})
|
||||||
|
defer cleanupS1()
|
||||||
|
codec := rpcClient(t, s1)
|
||||||
|
testutil.WaitForLeader(t, s1.RPC)
|
||||||
|
|
||||||
|
// Create the register request
|
||||||
|
job := mock.Job()
|
||||||
|
job.Canonicalize()
|
||||||
|
|
||||||
|
// Register the job.
|
||||||
|
requireAssert.NoError(msgpackrpc.CallWithCodec(codec, "Job.Register", &structs.JobRegisterRequest{
|
||||||
|
Job: job,
|
||||||
|
WriteRequest: structs.WriteRequest{
|
||||||
|
Region: "global",
|
||||||
|
Namespace: job.Namespace,
|
||||||
|
},
|
||||||
|
}, &structs.JobRegisterResponse{}))
|
||||||
|
|
||||||
|
// Create the deregister request.
|
||||||
|
deregReq := &structs.JobDeregisterRequest{
|
||||||
|
JobID: job.ID,
|
||||||
|
WriteRequest: structs.WriteRequest{
|
||||||
|
Region: "global",
|
||||||
|
Namespace: job.Namespace,
|
||||||
|
},
|
||||||
|
EvalPriority: 99,
|
||||||
|
}
|
||||||
|
var deregResp structs.JobDeregisterResponse
|
||||||
|
requireAssert.NoError(msgpackrpc.CallWithCodec(codec, "Job.Deregister", deregReq, &deregResp))
|
||||||
|
|
||||||
|
// Grab the eval from the state, and check its priority is as expected.
|
||||||
|
out, err := s1.fsm.State().EvalByID(nil, deregResp.EvalID)
|
||||||
|
requireAssert.NoError(err)
|
||||||
|
requireAssert.Equal(99, out.Priority)
|
||||||
|
}
|
||||||
|
|
||||||
func TestJobEndpoint_Deregister_Periodic(t *testing.T) {
|
func TestJobEndpoint_Deregister_Periodic(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
|
|
@ -6781,7 +6781,7 @@ func TestStateStore_UpsertDeploymentStatusUpdate_Successful(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert a deployment
|
// Insert a deployment
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
if err := state.UpsertDeployment(2, d); err != nil {
|
if err := state.UpsertDeployment(2, d); err != nil {
|
||||||
t.Fatalf("bad: %v", err)
|
t.Fatalf("bad: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -586,6 +586,14 @@ type JobRegisterRequest struct {
|
||||||
// PolicyOverride is set when the user is attempting to override any policies
|
// PolicyOverride is set when the user is attempting to override any policies
|
||||||
PolicyOverride bool
|
PolicyOverride bool
|
||||||
|
|
||||||
|
// EvalPriority is an optional priority to use on any evaluation created as
|
||||||
|
// a result on this job registration. This value must be between 1-100
|
||||||
|
// inclusively, where a larger value corresponds to a higher priority. This
|
||||||
|
// is useful when an operator wishes to push through a job registration in
|
||||||
|
// busy clusters with a large evaluation backlog. This avoids needing to
|
||||||
|
// change the job priority which also impacts preemption.
|
||||||
|
EvalPriority int
|
||||||
|
|
||||||
// Eval is the evaluation that is associated with the job registration
|
// Eval is the evaluation that is associated with the job registration
|
||||||
Eval *Evaluation
|
Eval *Evaluation
|
||||||
|
|
||||||
|
@ -606,6 +614,13 @@ type JobDeregisterRequest struct {
|
||||||
// deregistered. It is ignored for single-region jobs.
|
// deregistered. It is ignored for single-region jobs.
|
||||||
Global bool
|
Global bool
|
||||||
|
|
||||||
|
// EvalPriority is an optional priority to use on any evaluation created as
|
||||||
|
// a result on this job deregistration. This value must be between 1-100
|
||||||
|
// inclusively, where a larger value corresponds to a higher priority. This
|
||||||
|
// is useful when an operator wishes to push through a job deregistration
|
||||||
|
// in busy clusters with a large evaluation backlog.
|
||||||
|
EvalPriority int
|
||||||
|
|
||||||
// Eval is the evaluation to create that's associated with job deregister
|
// Eval is the evaluation to create that's associated with job deregister
|
||||||
Eval *Evaluation
|
Eval *Evaluation
|
||||||
|
|
||||||
|
@ -8847,12 +8862,18 @@ type Deployment struct {
|
||||||
// status.
|
// status.
|
||||||
StatusDescription string
|
StatusDescription string
|
||||||
|
|
||||||
|
// EvalPriority tracks the priority of the evaluation which lead to the
|
||||||
|
// creation of this Deployment object. Any additional evaluations created
|
||||||
|
// as a result of this deployment can therefore inherit this value, which
|
||||||
|
// is not guaranteed to be that of the job priority parameter.
|
||||||
|
EvalPriority int
|
||||||
|
|
||||||
CreateIndex uint64
|
CreateIndex uint64
|
||||||
ModifyIndex uint64
|
ModifyIndex uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDeployment creates a new deployment given the job.
|
// NewDeployment creates a new deployment given the job.
|
||||||
func NewDeployment(job *Job) *Deployment {
|
func NewDeployment(job *Job, evalPriority int) *Deployment {
|
||||||
return &Deployment{
|
return &Deployment{
|
||||||
ID: uuid.Generate(),
|
ID: uuid.Generate(),
|
||||||
Namespace: job.Namespace,
|
Namespace: job.Namespace,
|
||||||
|
@ -8865,6 +8886,7 @@ func NewDeployment(job *Job) *Deployment {
|
||||||
Status: DeploymentStatusRunning,
|
Status: DeploymentStatusRunning,
|
||||||
StatusDescription: DeploymentStatusDescriptionRunning,
|
StatusDescription: DeploymentStatusDescriptionRunning,
|
||||||
TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)),
|
TaskGroups: make(map[string]*DeploymentState, len(job.TaskGroups)),
|
||||||
|
EvalPriority: evalPriority,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -351,7 +351,7 @@ func (s *GenericScheduler) computeJobAllocs() error {
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(s.logger,
|
reconciler := NewAllocReconciler(s.logger,
|
||||||
genericAllocUpdateFn(s.ctx, s.stack, s.eval.ID),
|
genericAllocUpdateFn(s.ctx, s.stack, s.eval.ID),
|
||||||
s.batch, s.eval.JobID, s.job, s.deployment, allocs, tainted, s.eval.ID)
|
s.batch, s.eval.JobID, s.job, s.deployment, allocs, tainted, s.eval.ID, s.eval.Priority)
|
||||||
results := reconciler.Compute()
|
results := reconciler.Compute()
|
||||||
s.logger.Debug("reconciled current state with desired state", "results", log.Fmt("%#v", results))
|
s.logger.Debug("reconciled current state with desired state", "results", log.Fmt("%#v", results))
|
||||||
|
|
||||||
|
|
|
@ -73,8 +73,10 @@ type allocReconciler struct {
|
||||||
// existingAllocs is non-terminal existing allocations
|
// existingAllocs is non-terminal existing allocations
|
||||||
existingAllocs []*structs.Allocation
|
existingAllocs []*structs.Allocation
|
||||||
|
|
||||||
// evalID is the ID of the evaluation that triggered the reconciler
|
// evalID and evalPriority is the ID and Priority of the evaluation that
|
||||||
|
// triggered the reconciler.
|
||||||
evalID string
|
evalID string
|
||||||
|
evalPriority int
|
||||||
|
|
||||||
// now is the time used when determining rescheduling eligibility
|
// now is the time used when determining rescheduling eligibility
|
||||||
// defaults to time.Now, and overidden in unit tests
|
// defaults to time.Now, and overidden in unit tests
|
||||||
|
@ -160,7 +162,8 @@ func (r *reconcileResults) Changes() int {
|
||||||
// the changes required to bring the cluster state inline with the declared jobspec
|
// the changes required to bring the cluster state inline with the declared jobspec
|
||||||
func NewAllocReconciler(logger log.Logger, allocUpdateFn allocUpdateType, batch bool,
|
func NewAllocReconciler(logger log.Logger, allocUpdateFn allocUpdateType, batch bool,
|
||||||
jobID string, job *structs.Job, deployment *structs.Deployment,
|
jobID string, job *structs.Job, deployment *structs.Deployment,
|
||||||
existingAllocs []*structs.Allocation, taintedNodes map[string]*structs.Node, evalID string) *allocReconciler {
|
existingAllocs []*structs.Allocation, taintedNodes map[string]*structs.Node, evalID string,
|
||||||
|
evalPriority int) *allocReconciler {
|
||||||
return &allocReconciler{
|
return &allocReconciler{
|
||||||
logger: logger.Named("reconciler"),
|
logger: logger.Named("reconciler"),
|
||||||
allocUpdateFn: allocUpdateFn,
|
allocUpdateFn: allocUpdateFn,
|
||||||
|
@ -171,6 +174,7 @@ func NewAllocReconciler(logger log.Logger, allocUpdateFn allocUpdateType, batch
|
||||||
existingAllocs: existingAllocs,
|
existingAllocs: existingAllocs,
|
||||||
taintedNodes: taintedNodes,
|
taintedNodes: taintedNodes,
|
||||||
evalID: evalID,
|
evalID: evalID,
|
||||||
|
evalPriority: evalPriority,
|
||||||
now: time.Now(),
|
now: time.Now(),
|
||||||
result: &reconcileResults{
|
result: &reconcileResults{
|
||||||
desiredTGUpdates: make(map[string]*structs.DesiredUpdates),
|
desiredTGUpdates: make(map[string]*structs.DesiredUpdates),
|
||||||
|
@ -555,7 +559,7 @@ func (a *allocReconciler) computeGroup(group string, all allocSet) bool {
|
||||||
if !existingDeployment && !strategy.IsEmpty() && dstate.DesiredTotal != 0 && (!hadRunning || updatingSpec) {
|
if !existingDeployment && !strategy.IsEmpty() && dstate.DesiredTotal != 0 && (!hadRunning || updatingSpec) {
|
||||||
// A previous group may have made the deployment already
|
// A previous group may have made the deployment already
|
||||||
if a.deployment == nil {
|
if a.deployment == nil {
|
||||||
a.deployment = structs.NewDeployment(a.job)
|
a.deployment = structs.NewDeployment(a.job, a.evalPriority)
|
||||||
// in multiregion jobs, most deployments start in a pending state
|
// in multiregion jobs, most deployments start in a pending state
|
||||||
if a.job.IsMultiregion() && !(a.job.IsPeriodic() && a.job.IsParameterized()) {
|
if a.job.IsMultiregion() && !(a.job.IsPeriodic() && a.job.IsParameterized()) {
|
||||||
a.deployment.Status = structs.DeploymentStatusPending
|
a.deployment.Status = structs.DeploymentStatusPending
|
||||||
|
@ -942,7 +946,7 @@ func (a *allocReconciler) handleDelayedLost(rescheduleLater []*delayedReschedule
|
||||||
eval := &structs.Evaluation{
|
eval := &structs.Evaluation{
|
||||||
ID: uuid.Generate(),
|
ID: uuid.Generate(),
|
||||||
Namespace: a.job.Namespace,
|
Namespace: a.job.Namespace,
|
||||||
Priority: a.job.Priority,
|
Priority: a.evalPriority,
|
||||||
Type: a.job.Type,
|
Type: a.job.Type,
|
||||||
TriggeredBy: structs.EvalTriggerRetryFailedAlloc,
|
TriggeredBy: structs.EvalTriggerRetryFailedAlloc,
|
||||||
JobID: a.job.ID,
|
JobID: a.job.ID,
|
||||||
|
@ -963,7 +967,7 @@ func (a *allocReconciler) handleDelayedLost(rescheduleLater []*delayedReschedule
|
||||||
eval = &structs.Evaluation{
|
eval = &structs.Evaluation{
|
||||||
ID: uuid.Generate(),
|
ID: uuid.Generate(),
|
||||||
Namespace: a.job.Namespace,
|
Namespace: a.job.Namespace,
|
||||||
Priority: a.job.Priority,
|
Priority: a.evalPriority,
|
||||||
Type: a.job.Type,
|
Type: a.job.Type,
|
||||||
TriggeredBy: structs.EvalTriggerRetryFailedAlloc,
|
TriggeredBy: structs.EvalTriggerRetryFailedAlloc,
|
||||||
JobID: a.job.ID,
|
JobID: a.job.ID,
|
||||||
|
|
|
@ -262,7 +262,7 @@ type resultExpectation struct {
|
||||||
|
|
||||||
func assertResults(t *testing.T, r *reconcileResults, exp *resultExpectation) {
|
func assertResults(t *testing.T, r *reconcileResults, exp *resultExpectation) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
assert := assert.New(t)
|
assertion := assert.New(t)
|
||||||
|
|
||||||
if exp.createDeployment != nil && r.deployment == nil {
|
if exp.createDeployment != nil && r.deployment == nil {
|
||||||
t.Errorf("Expect a created deployment got none")
|
t.Errorf("Expect a created deployment got none")
|
||||||
|
@ -277,20 +277,22 @@ func assertResults(t *testing.T, r *reconcileResults, exp *resultExpectation) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.EqualValues(exp.deploymentUpdates, r.deploymentUpdates, "Expected Deployment Updates")
|
assertion.EqualValues(exp.deploymentUpdates, r.deploymentUpdates, "Expected Deployment Updates")
|
||||||
assert.Len(r.place, exp.place, "Expected Placements")
|
assertion.Len(r.place, exp.place, "Expected Placements")
|
||||||
assert.Len(r.destructiveUpdate, exp.destructive, "Expected Destructive")
|
assertion.Len(r.destructiveUpdate, exp.destructive, "Expected Destructive")
|
||||||
assert.Len(r.inplaceUpdate, exp.inplace, "Expected Inplace Updates")
|
assertion.Len(r.inplaceUpdate, exp.inplace, "Expected Inplace Updates")
|
||||||
assert.Len(r.attributeUpdates, exp.attributeUpdates, "Expected Attribute Updates")
|
assertion.Len(r.attributeUpdates, exp.attributeUpdates, "Expected Attribute Updates")
|
||||||
assert.Len(r.stop, exp.stop, "Expected Stops")
|
assertion.Len(r.stop, exp.stop, "Expected Stops")
|
||||||
assert.EqualValues(exp.desiredTGUpdates, r.desiredTGUpdates, "Expected Desired TG Update Annotations")
|
assertion.EqualValues(exp.desiredTGUpdates, r.desiredTGUpdates, "Expected Desired TG Update Annotations")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tests the reconciler properly handles placements for a job that has no
|
// Tests the reconciler properly handles placements for a job that has no
|
||||||
// existing allocations
|
// existing allocations
|
||||||
func TestReconciler_Place_NoExisting(t *testing.T) {
|
func TestReconciler_Place_NoExisting(t *testing.T) {
|
||||||
job := mock.Job()
|
job := mock.Job()
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, nil, nil, "")
|
reconciler := NewAllocReconciler(
|
||||||
|
testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, nil, nil, "", job.Priority)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -326,7 +328,8 @@ func TestReconciler_Place_Existing(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -364,7 +367,8 @@ func TestReconciler_ScaleDown_Partial(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -403,7 +407,8 @@ func TestReconciler_ScaleDown_Zero(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -443,7 +448,8 @@ func TestReconciler_ScaleDown_Zero_DuplicateNames(t *testing.T) {
|
||||||
expectedStopped = append(expectedStopped, i%2)
|
expectedStopped = append(expectedStopped, i%2)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -478,7 +484,8 @@ func TestReconciler_Inplace(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnInplace, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnInplace, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -516,7 +523,8 @@ func TestReconciler_Inplace_ScaleUp(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnInplace, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnInplace, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -556,7 +564,8 @@ func TestReconciler_Inplace_ScaleDown(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnInplace, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnInplace, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -618,7 +627,7 @@ func TestReconciler_Inplace_Rollback(t *testing.T) {
|
||||||
}, allocUpdateFnDestructive)
|
}, allocUpdateFnDestructive)
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFn,
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFn,
|
||||||
false, job.ID, job, nil, allocs, nil, uuid.Generate())
|
false, job.ID, job, nil, allocs, nil, uuid.Generate(), 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -661,7 +670,8 @@ func TestReconciler_Destructive(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -694,7 +704,8 @@ func TestReconciler_DestructiveMaxParallel(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -730,7 +741,8 @@ func TestReconciler_Destructive_ScaleUp(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -769,7 +781,8 @@ func TestReconciler_Destructive_ScaleDown(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -814,7 +827,8 @@ func TestReconciler_LostNode(t *testing.T) {
|
||||||
tainted[n.ID] = n
|
tainted[n.ID] = n
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, tainted, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -864,7 +878,8 @@ func TestReconciler_LostNode_ScaleUp(t *testing.T) {
|
||||||
tainted[n.ID] = n
|
tainted[n.ID] = n
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, tainted, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -914,7 +929,8 @@ func TestReconciler_LostNode_ScaleDown(t *testing.T) {
|
||||||
tainted[n.ID] = n
|
tainted[n.ID] = n
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, tainted, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -959,7 +975,8 @@ func TestReconciler_DrainNode(t *testing.T) {
|
||||||
tainted[n.ID] = n
|
tainted[n.ID] = n
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, tainted, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -1011,7 +1028,8 @@ func TestReconciler_DrainNode_ScaleUp(t *testing.T) {
|
||||||
tainted[n.ID] = n
|
tainted[n.ID] = n
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, tainted, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -1064,7 +1082,8 @@ func TestReconciler_DrainNode_ScaleDown(t *testing.T) {
|
||||||
tainted[n.ID] = n
|
tainted[n.ID] = n
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, tainted, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -1109,7 +1128,8 @@ func TestReconciler_RemovedTG(t *testing.T) {
|
||||||
newName := "different"
|
newName := "different"
|
||||||
job.TaskGroups[0].Name = newName
|
job.TaskGroups[0].Name = newName
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -1171,7 +1191,8 @@ func TestReconciler_JobStopped(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, c.jobID, c.job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, c.jobID, c.job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -1237,7 +1258,8 @@ func TestReconciler_JobStopped_TerminalAllocs(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, c.jobID, c.job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, c.jobID, c.job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
require.Len(t, r.stop, 0)
|
require.Len(t, r.stop, 0)
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -1273,7 +1295,8 @@ func TestReconciler_MultiTG(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -1320,12 +1343,13 @@ func TestReconciler_MultiTG_SingleUpdateStanza(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -1401,7 +1425,8 @@ func TestReconciler_RescheduleLater_Batch(t *testing.T) {
|
||||||
// Mark one as complete
|
// Mark one as complete
|
||||||
allocs[5].ClientStatus = structs.AllocClientStatusComplete
|
allocs[5].ClientStatus = structs.AllocClientStatusComplete
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, true, job.ID, job, nil, allocs, nil, uuid.Generate())
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, true, job.ID, job,
|
||||||
|
nil, allocs, nil, uuid.Generate(), 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Two reschedule attempts were already made, one more can be made at a future time
|
// Two reschedule attempts were already made, one more can be made at a future time
|
||||||
|
@ -1481,7 +1506,8 @@ func TestReconciler_RescheduleLaterWithBatchedEvals_Batch(t *testing.T) {
|
||||||
FinishedAt: now.Add(10 * time.Second)}}
|
FinishedAt: now.Add(10 * time.Second)}}
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, true, job.ID, job, nil, allocs, nil, uuid.Generate())
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, true, job.ID, job,
|
||||||
|
nil, allocs, nil, uuid.Generate(), 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Verify that two follow up evals were created
|
// Verify that two follow up evals were created
|
||||||
|
@ -1575,7 +1601,8 @@ func TestReconciler_RescheduleNow_Batch(t *testing.T) {
|
||||||
// Mark one as complete
|
// Mark one as complete
|
||||||
allocs[5].ClientStatus = structs.AllocClientStatusComplete
|
allocs[5].ClientStatus = structs.AllocClientStatusComplete
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, true, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, true, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
reconciler.now = now
|
reconciler.now = now
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
|
@ -1650,7 +1677,8 @@ func TestReconciler_RescheduleLater_Service(t *testing.T) {
|
||||||
// Mark one as desired state stop
|
// Mark one as desired state stop
|
||||||
allocs[4].DesiredStatus = structs.AllocDesiredStatusStop
|
allocs[4].DesiredStatus = structs.AllocDesiredStatusStop
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, uuid.Generate())
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, uuid.Generate(), 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Should place a new placement and create a follow up eval for the delayed reschedule
|
// Should place a new placement and create a follow up eval for the delayed reschedule
|
||||||
|
@ -1719,7 +1747,8 @@ func TestReconciler_Service_ClientStatusComplete(t *testing.T) {
|
||||||
// Mark one as client status complete
|
// Mark one as client status complete
|
||||||
allocs[4].ClientStatus = structs.AllocClientStatusComplete
|
allocs[4].ClientStatus = structs.AllocClientStatusComplete
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Should place a new placement for the alloc that was marked complete
|
// Should place a new placement for the alloc that was marked complete
|
||||||
|
@ -1775,7 +1804,8 @@ func TestReconciler_Service_DesiredStop_ClientStatusComplete(t *testing.T) {
|
||||||
allocs[4].ClientStatus = structs.AllocClientStatusFailed
|
allocs[4].ClientStatus = structs.AllocClientStatusFailed
|
||||||
allocs[4].DesiredStatus = structs.AllocDesiredStatusStop
|
allocs[4].DesiredStatus = structs.AllocDesiredStatusStop
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Should place a new placement for the alloc that was marked stopped
|
// Should place a new placement for the alloc that was marked stopped
|
||||||
|
@ -1852,7 +1882,8 @@ func TestReconciler_RescheduleNow_Service(t *testing.T) {
|
||||||
// Mark one as desired state stop
|
// Mark one as desired state stop
|
||||||
allocs[4].DesiredStatus = structs.AllocDesiredStatusStop
|
allocs[4].DesiredStatus = structs.AllocDesiredStatusStop
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Verify that no follow up evals were created
|
// Verify that no follow up evals were created
|
||||||
|
@ -1930,7 +1961,8 @@ func TestReconciler_RescheduleNow_WithinAllowedTimeWindow(t *testing.T) {
|
||||||
FinishedAt: now.Add(-4 * time.Second)}}
|
FinishedAt: now.Add(-4 * time.Second)}}
|
||||||
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
reconciler.now = now
|
reconciler.now = now
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
|
@ -2011,7 +2043,8 @@ func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) {
|
||||||
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
allocs[1].ClientStatus = structs.AllocClientStatusFailed
|
||||||
allocs[1].FollowupEvalID = evalID
|
allocs[1].FollowupEvalID = evalID
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, evalID)
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, evalID, 50)
|
||||||
reconciler.now = now.Add(-30 * time.Second)
|
reconciler.now = now.Add(-30 * time.Second)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
|
@ -2065,7 +2098,7 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) {
|
||||||
job2 := job.Copy()
|
job2 := job.Copy()
|
||||||
job2.Version++
|
job2.Version++
|
||||||
|
|
||||||
d := structs.NewDeployment(job2)
|
d := structs.NewDeployment(job2, 50)
|
||||||
d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
||||||
s := &structs.DeploymentState{
|
s := &structs.DeploymentState{
|
||||||
DesiredCanaries: 2,
|
DesiredCanaries: 2,
|
||||||
|
@ -2120,7 +2153,8 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job2, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job2,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Verify that no follow up evals were created
|
// Verify that no follow up evals were created
|
||||||
|
@ -2171,7 +2205,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) {
|
||||||
job2 := job.Copy()
|
job2 := job.Copy()
|
||||||
job2.Version++
|
job2.Version++
|
||||||
|
|
||||||
d := structs.NewDeployment(job2)
|
d := structs.NewDeployment(job2, 50)
|
||||||
d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
||||||
s := &structs.DeploymentState{
|
s := &structs.DeploymentState{
|
||||||
DesiredCanaries: 2,
|
DesiredCanaries: 2,
|
||||||
|
@ -2243,7 +2277,8 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job2, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job2,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
reconciler.now = now
|
reconciler.now = now
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
|
@ -2298,7 +2333,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) {
|
||||||
job2 := job.Copy()
|
job2 := job.Copy()
|
||||||
job2.Version++
|
job2.Version++
|
||||||
|
|
||||||
d := structs.NewDeployment(job2)
|
d := structs.NewDeployment(job2, 50)
|
||||||
d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
d.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
||||||
s := &structs.DeploymentState{
|
s := &structs.DeploymentState{
|
||||||
DesiredCanaries: 2,
|
DesiredCanaries: 2,
|
||||||
|
@ -2370,7 +2405,8 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job2, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job2,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
reconciler.now = now
|
reconciler.now = now
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
|
@ -2435,7 +2471,8 @@ func TestReconciler_DontReschedule_PreviouslyRescheduled(t *testing.T) {
|
||||||
// Mark one as desired state stop
|
// Mark one as desired state stop
|
||||||
allocs[4].DesiredStatus = structs.AllocDesiredStatusStop
|
allocs[4].DesiredStatus = structs.AllocDesiredStatusStop
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Should place 1 - one is a new placement to make up the desired count of 5
|
// Should place 1 - one is a new placement to make up the desired count of 5
|
||||||
|
@ -2463,8 +2500,8 @@ func TestReconciler_CancelDeployment_JobStop(t *testing.T) {
|
||||||
job := mock.Job()
|
job := mock.Job()
|
||||||
job.Stop = true
|
job.Stop = true
|
||||||
|
|
||||||
running := structs.NewDeployment(job)
|
running := structs.NewDeployment(job, 50)
|
||||||
failed := structs.NewDeployment(job)
|
failed := structs.NewDeployment(job, 50)
|
||||||
failed.Status = structs.DeploymentStatusFailed
|
failed.Status = structs.DeploymentStatusFailed
|
||||||
|
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
|
@ -2522,7 +2559,8 @@ func TestReconciler_CancelDeployment_JobStop(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, c.jobID, c.job, c.deployment, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, c.jobID, c.job,
|
||||||
|
c.deployment, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
var updates []*structs.DeploymentStatusUpdate
|
var updates []*structs.DeploymentStatusUpdate
|
||||||
|
@ -2561,8 +2599,8 @@ func TestReconciler_CancelDeployment_JobUpdate(t *testing.T) {
|
||||||
job := mock.Job()
|
job := mock.Job()
|
||||||
|
|
||||||
// Create two deployments
|
// Create two deployments
|
||||||
running := structs.NewDeployment(job)
|
running := structs.NewDeployment(job, 50)
|
||||||
failed := structs.NewDeployment(job)
|
failed := structs.NewDeployment(job, 50)
|
||||||
failed.Status = structs.DeploymentStatusFailed
|
failed.Status = structs.DeploymentStatusFailed
|
||||||
|
|
||||||
// Make the job newer than the deployment
|
// Make the job newer than the deployment
|
||||||
|
@ -2599,7 +2637,8 @@ func TestReconciler_CancelDeployment_JobUpdate(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, c.deployment, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
c.deployment, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
var updates []*structs.DeploymentStatusUpdate
|
var updates []*structs.DeploymentStatusUpdate
|
||||||
|
@ -2648,10 +2687,11 @@ func TestReconciler_CreateDeployment_RollingUpgrade_Destructive(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
}
|
}
|
||||||
|
@ -2691,10 +2731,11 @@ func TestReconciler_CreateDeployment_RollingUpgrade_Inplace(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnInplace, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnInplace, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
}
|
}
|
||||||
|
@ -2733,10 +2774,11 @@ func TestReconciler_CreateDeployment_NewerCreateIndex(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredTotal: 5,
|
DesiredTotal: 5,
|
||||||
}
|
}
|
||||||
|
@ -2777,7 +2819,8 @@ func TestReconciler_DontCreateDeployment_NoChanges(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -2822,7 +2865,7 @@ func TestReconciler_PausedOrFailedDeployment_NoMoreCanaries(t *testing.T) {
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
t.Run(c.name, func(t *testing.T) {
|
t.Run(c.name, func(t *testing.T) {
|
||||||
// Create a deployment that is paused/failed and has placed some canaries
|
// Create a deployment that is paused/failed and has placed some canaries
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.Status = c.deploymentStatus
|
d.Status = c.deploymentStatus
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: false,
|
Promoted: false,
|
||||||
|
@ -2855,7 +2898,8 @@ func TestReconciler_PausedOrFailedDeployment_NoMoreCanaries(t *testing.T) {
|
||||||
d.TaskGroups[canary.TaskGroup].PlacedCanaries = []string{canary.ID}
|
d.TaskGroups[canary.TaskGroup].PlacedCanaries = []string{canary.ID}
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(map[string]allocUpdateType{canary.ID: allocUpdateFnIgnore}, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(map[string]allocUpdateType{canary.ID: allocUpdateFnIgnore}, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -2900,7 +2944,7 @@ func TestReconciler_PausedOrFailedDeployment_NoMorePlacements(t *testing.T) {
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
t.Run(c.name, func(t *testing.T) {
|
t.Run(c.name, func(t *testing.T) {
|
||||||
// Create a deployment that is paused and has placed some canaries
|
// Create a deployment that is paused and has placed some canaries
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.Status = c.deploymentStatus
|
d.Status = c.deploymentStatus
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: false,
|
Promoted: false,
|
||||||
|
@ -2920,7 +2964,8 @@ func TestReconciler_PausedOrFailedDeployment_NoMorePlacements(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -2963,7 +3008,7 @@ func TestReconciler_PausedOrFailedDeployment_NoMoreDestructiveUpdates(t *testing
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
t.Run(c.name, func(t *testing.T) {
|
t.Run(c.name, func(t *testing.T) {
|
||||||
// Create a deployment that is paused and has placed some canaries
|
// Create a deployment that is paused and has placed some canaries
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.Status = c.deploymentStatus
|
d.Status = c.deploymentStatus
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: false,
|
Promoted: false,
|
||||||
|
@ -2994,7 +3039,8 @@ func TestReconciler_PausedOrFailedDeployment_NoMoreDestructiveUpdates(t *testing
|
||||||
allocs = append(allocs, newAlloc)
|
allocs = append(allocs, newAlloc)
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(map[string]allocUpdateType{newAlloc.ID: allocUpdateFnIgnore}, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(map[string]allocUpdateType{newAlloc.ID: allocUpdateFnIgnore}, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -3020,7 +3066,7 @@ func TestReconciler_DrainNode_Canary(t *testing.T) {
|
||||||
job.TaskGroups[0].Update = canaryUpdate
|
job.TaskGroups[0].Update = canaryUpdate
|
||||||
|
|
||||||
// Create a deployment that is paused and has placed some canaries
|
// Create a deployment that is paused and has placed some canaries
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
s := &structs.DeploymentState{
|
s := &structs.DeploymentState{
|
||||||
Promoted: false,
|
Promoted: false,
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
|
@ -3065,7 +3111,8 @@ func TestReconciler_DrainNode_Canary(t *testing.T) {
|
||||||
tainted[n.ID] = n
|
tainted[n.ID] = n
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, tainted, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, tainted, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -3092,7 +3139,7 @@ func TestReconciler_LostNode_Canary(t *testing.T) {
|
||||||
job.TaskGroups[0].Update = canaryUpdate
|
job.TaskGroups[0].Update = canaryUpdate
|
||||||
|
|
||||||
// Create a deployment that is paused and has placed some canaries
|
// Create a deployment that is paused and has placed some canaries
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
s := &structs.DeploymentState{
|
s := &structs.DeploymentState{
|
||||||
Promoted: false,
|
Promoted: false,
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
|
@ -3137,7 +3184,8 @@ func TestReconciler_LostNode_Canary(t *testing.T) {
|
||||||
tainted[n.ID] = n
|
tainted[n.ID] = n
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, tainted, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, tainted, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -3165,7 +3213,7 @@ func TestReconciler_StopOldCanaries(t *testing.T) {
|
||||||
job.TaskGroups[0].Update = canaryUpdate
|
job.TaskGroups[0].Update = canaryUpdate
|
||||||
|
|
||||||
// Create an old deployment that has placed some canaries
|
// Create an old deployment that has placed some canaries
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
s := &structs.DeploymentState{
|
s := &structs.DeploymentState{
|
||||||
Promoted: false,
|
Promoted: false,
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
|
@ -3203,10 +3251,11 @@ func TestReconciler_StopOldCanaries(t *testing.T) {
|
||||||
allocs = append(allocs, canary)
|
allocs = append(allocs, canary)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, d,
|
||||||
|
allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
newD := structs.NewDeployment(job)
|
newD := structs.NewDeployment(job, 50)
|
||||||
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
||||||
newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredCanaries: 2,
|
DesiredCanaries: 2,
|
||||||
|
@ -3256,10 +3305,11 @@ func TestReconciler_NewCanaries(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
newD := structs.NewDeployment(job)
|
newD := structs.NewDeployment(job, 50)
|
||||||
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
||||||
newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredCanaries: 2,
|
DesiredCanaries: 2,
|
||||||
|
@ -3304,10 +3354,11 @@ func TestReconciler_NewCanaries_CountGreater(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
newD := structs.NewDeployment(job)
|
newD := structs.NewDeployment(job, 50)
|
||||||
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
||||||
state := &structs.DeploymentState{
|
state := &structs.DeploymentState{
|
||||||
DesiredCanaries: 7,
|
DesiredCanaries: 7,
|
||||||
|
@ -3355,10 +3406,11 @@ func TestReconciler_NewCanaries_MultiTG(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
newD := structs.NewDeployment(job)
|
newD := structs.NewDeployment(job, 50)
|
||||||
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
||||||
state := &structs.DeploymentState{
|
state := &structs.DeploymentState{
|
||||||
DesiredCanaries: 2,
|
DesiredCanaries: 2,
|
||||||
|
@ -3408,10 +3460,11 @@ func TestReconciler_NewCanaries_ScaleUp(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
newD := structs.NewDeployment(job)
|
newD := structs.NewDeployment(job, 50)
|
||||||
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
||||||
newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredCanaries: 2,
|
DesiredCanaries: 2,
|
||||||
|
@ -3456,10 +3509,11 @@ func TestReconciler_NewCanaries_ScaleDown(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
newD := structs.NewDeployment(job)
|
newD := structs.NewDeployment(job, 50)
|
||||||
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
|
||||||
newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredCanaries: 2,
|
DesiredCanaries: 2,
|
||||||
|
@ -3498,7 +3552,7 @@ func TestReconciler_NewCanaries_FillNames(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an existing deployment that has placed some canaries
|
// Create an existing deployment that has placed some canaries
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
s := &structs.DeploymentState{
|
s := &structs.DeploymentState{
|
||||||
Promoted: false,
|
Promoted: false,
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
|
@ -3533,7 +3587,8 @@ func TestReconciler_NewCanaries_FillNames(t *testing.T) {
|
||||||
allocs = append(allocs, canary)
|
allocs = append(allocs, canary)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -3561,7 +3616,7 @@ func TestReconciler_PromoteCanaries_Unblock(t *testing.T) {
|
||||||
|
|
||||||
// Create an existing deployment that has placed some canaries and mark them
|
// Create an existing deployment that has placed some canaries and mark them
|
||||||
// promoted
|
// promoted
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
s := &structs.DeploymentState{
|
s := &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
|
@ -3602,7 +3657,8 @@ func TestReconciler_PromoteCanaries_Unblock(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -3634,7 +3690,7 @@ func TestReconciler_PromoteCanaries_CanariesEqualCount(t *testing.T) {
|
||||||
|
|
||||||
// Create an existing deployment that has placed some canaries and mark them
|
// Create an existing deployment that has placed some canaries and mark them
|
||||||
// promoted
|
// promoted
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
s := &structs.DeploymentState{
|
s := &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
DesiredTotal: 2,
|
DesiredTotal: 2,
|
||||||
|
@ -3676,7 +3732,8 @@ func TestReconciler_PromoteCanaries_CanariesEqualCount(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
updates := []*structs.DeploymentStatusUpdate{
|
updates := []*structs.DeploymentStatusUpdate{
|
||||||
|
@ -3736,7 +3793,7 @@ func TestReconciler_DeploymentLimit_HealthAccounting(t *testing.T) {
|
||||||
t.Run(fmt.Sprintf("%d healthy", c.healthy), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%d healthy", c.healthy), func(t *testing.T) {
|
||||||
// Create an existing deployment that has placed some canaries and mark them
|
// Create an existing deployment that has placed some canaries and mark them
|
||||||
// promoted
|
// promoted
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
|
@ -3775,7 +3832,8 @@ func TestReconciler_DeploymentLimit_HealthAccounting(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -3805,7 +3863,7 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) {
|
||||||
job.TaskGroups[0].Update = noCanaryUpdate
|
job.TaskGroups[0].Update = noCanaryUpdate
|
||||||
|
|
||||||
// Create an existing deployment that has some placed allocs
|
// Create an existing deployment that has some placed allocs
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
|
@ -3856,7 +3914,8 @@ func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, tainted, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, tainted, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -3889,7 +3948,7 @@ func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) {
|
||||||
job.TaskGroups[0].Update = noCanaryUpdate
|
job.TaskGroups[0].Update = noCanaryUpdate
|
||||||
|
|
||||||
// Create an existing failed deployment that has some placed allocs
|
// Create an existing failed deployment that has some placed allocs
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.Status = structs.DeploymentStatusFailed
|
d.Status = structs.DeploymentStatusFailed
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
|
@ -3941,7 +4000,8 @@ func TestReconciler_FailedDeployment_TaintedNodes(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, tainted, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, tainted, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -3971,7 +4031,7 @@ func TestReconciler_CompleteDeployment(t *testing.T) {
|
||||||
job := mock.Job()
|
job := mock.Job()
|
||||||
job.TaskGroups[0].Update = canaryUpdate
|
job.TaskGroups[0].Update = canaryUpdate
|
||||||
|
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.Status = structs.DeploymentStatusSuccessful
|
d.Status = structs.DeploymentStatusSuccessful
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
|
@ -3997,7 +4057,8 @@ func TestReconciler_CompleteDeployment(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -4022,7 +4083,7 @@ func TestReconciler_MarkDeploymentComplete_FailedAllocations(t *testing.T) {
|
||||||
job := mock.Job()
|
job := mock.Job()
|
||||||
job.TaskGroups[0].Update = noCanaryUpdate
|
job.TaskGroups[0].Update = noCanaryUpdate
|
||||||
|
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
PlacedAllocs: 20,
|
PlacedAllocs: 20,
|
||||||
|
@ -4052,7 +4113,8 @@ func TestReconciler_MarkDeploymentComplete_FailedAllocations(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID,
|
||||||
|
job, d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
updates := []*structs.DeploymentStatusUpdate{
|
updates := []*structs.DeploymentStatusUpdate{
|
||||||
|
@ -4087,7 +4149,7 @@ func TestReconciler_FailedDeployment_CancelCanaries(t *testing.T) {
|
||||||
job.TaskGroups[1].Name = "two"
|
job.TaskGroups[1].Name = "two"
|
||||||
|
|
||||||
// Create an existing failed deployment that has promoted one task group
|
// Create an existing failed deployment that has promoted one task group
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.Status = structs.DeploymentStatusFailed
|
d.Status = structs.DeploymentStatusFailed
|
||||||
s0 := &structs.DeploymentState{
|
s0 := &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
|
@ -4147,7 +4209,8 @@ func TestReconciler_FailedDeployment_CancelCanaries(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -4177,7 +4240,7 @@ func TestReconciler_FailedDeployment_NewJob(t *testing.T) {
|
||||||
job.TaskGroups[0].Update = noCanaryUpdate
|
job.TaskGroups[0].Update = noCanaryUpdate
|
||||||
|
|
||||||
// Create an existing failed deployment that has some placed allocs
|
// Create an existing failed deployment that has some placed allocs
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.Status = structs.DeploymentStatusFailed
|
d.Status = structs.DeploymentStatusFailed
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
|
@ -4216,10 +4279,11 @@ func TestReconciler_FailedDeployment_NewJob(t *testing.T) {
|
||||||
jobNew := job.Copy()
|
jobNew := job.Copy()
|
||||||
jobNew.Version += 100
|
jobNew.Version += 100
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, jobNew, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, jobNew,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
dnew := structs.NewDeployment(jobNew)
|
dnew := structs.NewDeployment(jobNew, 50)
|
||||||
dnew.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
dnew.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
}
|
}
|
||||||
|
@ -4245,7 +4309,7 @@ func TestReconciler_MarkDeploymentComplete(t *testing.T) {
|
||||||
job := mock.Job()
|
job := mock.Job()
|
||||||
job.TaskGroups[0].Update = noCanaryUpdate
|
job.TaskGroups[0].Update = noCanaryUpdate
|
||||||
|
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
|
@ -4269,7 +4333,8 @@ func TestReconciler_MarkDeploymentComplete(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
updates := []*structs.DeploymentStatusUpdate{
|
updates := []*structs.DeploymentStatusUpdate{
|
||||||
|
@ -4304,7 +4369,7 @@ func TestReconciler_JobChange_ScaleUp_SecondEval(t *testing.T) {
|
||||||
job.TaskGroups[0].Count = 30
|
job.TaskGroups[0].Count = 30
|
||||||
|
|
||||||
// Create a deployment that is paused and has placed some canaries
|
// Create a deployment that is paused and has placed some canaries
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: false,
|
Promoted: false,
|
||||||
DesiredTotal: 30,
|
DesiredTotal: 30,
|
||||||
|
@ -4338,7 +4403,8 @@ func TestReconciler_JobChange_ScaleUp_SecondEval(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), mockUpdateFn, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -4373,10 +4439,11 @@ func TestReconciler_RollingUpgrade_MissingAllocs(t *testing.T) {
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
DesiredTotal: 10,
|
DesiredTotal: 10,
|
||||||
}
|
}
|
||||||
|
@ -4425,7 +4492,8 @@ func TestReconciler_Batch_Rerun(t *testing.T) {
|
||||||
job2 := job.Copy()
|
job2 := job.Copy()
|
||||||
job2.CreateIndex++
|
job2.CreateIndex++
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, true, job2.ID, job2, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, true, job2.ID, job2,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert the correct results
|
// Assert the correct results
|
||||||
|
@ -4454,7 +4522,7 @@ func TestReconciler_FailedDeployment_DontReschedule(t *testing.T) {
|
||||||
tgName := job.TaskGroups[0].Name
|
tgName := job.TaskGroups[0].Name
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
// Create an existing failed deployment that has some placed allocs
|
// Create an existing failed deployment that has some placed allocs
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.Status = structs.DeploymentStatusFailed
|
d.Status = structs.DeploymentStatusFailed
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
|
@ -4486,7 +4554,8 @@ func TestReconciler_FailedDeployment_DontReschedule(t *testing.T) {
|
||||||
StartedAt: now.Add(-1 * time.Hour),
|
StartedAt: now.Add(-1 * time.Hour),
|
||||||
FinishedAt: now.Add(-10 * time.Second)}}
|
FinishedAt: now.Add(-10 * time.Second)}}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert that no rescheduled placements were created
|
// Assert that no rescheduled placements were created
|
||||||
|
@ -4511,7 +4580,7 @@ func TestReconciler_DeploymentWithFailedAllocs_DontReschedule(t *testing.T) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
// Mock deployment with failed allocs, but deployment watcher hasn't marked it as failed yet
|
// Mock deployment with failed allocs, but deployment watcher hasn't marked it as failed yet
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.Status = structs.DeploymentStatusRunning
|
d.Status = structs.DeploymentStatusRunning
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: false,
|
Promoted: false,
|
||||||
|
@ -4541,7 +4610,8 @@ func TestReconciler_DeploymentWithFailedAllocs_DontReschedule(t *testing.T) {
|
||||||
allocs[i].DesiredTransition.Reschedule = helper.BoolToPtr(true)
|
allocs[i].DesiredTransition.Reschedule = helper.BoolToPtr(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert that no rescheduled placements were created
|
// Assert that no rescheduled placements were created
|
||||||
|
@ -4584,7 +4654,7 @@ func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) {
|
||||||
jobv2.Version = 2
|
jobv2.Version = 2
|
||||||
jobv2.TaskGroups[0].Meta = map[string]string{"version": "2"}
|
jobv2.TaskGroups[0].Meta = map[string]string{"version": "2"}
|
||||||
|
|
||||||
d := structs.NewDeployment(jobv2)
|
d := structs.NewDeployment(jobv2, 50)
|
||||||
state := &structs.DeploymentState{
|
state := &structs.DeploymentState{
|
||||||
Promoted: true,
|
Promoted: true,
|
||||||
DesiredTotal: 3,
|
DesiredTotal: 3,
|
||||||
|
@ -4626,7 +4696,8 @@ func TestReconciler_FailedDeployment_AutoRevert_CancelCanaries(t *testing.T) {
|
||||||
allocs = append(allocs, new)
|
allocs = append(allocs, new)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, jobv2, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, jobv2,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
updates := []*structs.DeploymentStatusUpdate{
|
updates := []*structs.DeploymentStatusUpdate{
|
||||||
|
@ -4663,7 +4734,7 @@ func TestReconciler_SuccessfulDeploymentWithFailedAllocs_Reschedule(t *testing.T
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
// Mock deployment with failed allocs, but deployment watcher hasn't marked it as failed yet
|
// Mock deployment with failed allocs, but deployment watcher hasn't marked it as failed yet
|
||||||
d := structs.NewDeployment(job)
|
d := structs.NewDeployment(job, 50)
|
||||||
d.Status = structs.DeploymentStatusSuccessful
|
d.Status = structs.DeploymentStatusSuccessful
|
||||||
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
|
||||||
Promoted: false,
|
Promoted: false,
|
||||||
|
@ -4688,7 +4759,8 @@ func TestReconciler_SuccessfulDeploymentWithFailedAllocs_Reschedule(t *testing.T
|
||||||
allocs = append(allocs, alloc)
|
allocs = append(allocs, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job, d, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnDestructive, false, job.ID, job,
|
||||||
|
d, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Assert that rescheduled placements were created
|
// Assert that rescheduled placements were created
|
||||||
|
@ -4752,7 +4824,8 @@ func TestReconciler_ForceReschedule_Service(t *testing.T) {
|
||||||
// Mark DesiredTransition ForceReschedule
|
// Mark DesiredTransition ForceReschedule
|
||||||
allocs[0].DesiredTransition = structs.DesiredTransition{ForceReschedule: helper.BoolToPtr(true)}
|
allocs[0].DesiredTransition = structs.DesiredTransition{ForceReschedule: helper.BoolToPtr(true)}
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Verify that no follow up evals were created
|
// Verify that no follow up evals were created
|
||||||
|
@ -4834,7 +4907,8 @@ func TestReconciler_RescheduleNot_Service(t *testing.T) {
|
||||||
// Mark one as desired state stop
|
// Mark one as desired state stop
|
||||||
allocs[4].DesiredStatus = structs.AllocDesiredStatusStop
|
allocs[4].DesiredStatus = structs.AllocDesiredStatusStop
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
// Verify that no follow up evals were created
|
// Verify that no follow up evals were created
|
||||||
|
@ -4919,7 +4993,8 @@ func TestReconciler_RescheduleNot_Batch(t *testing.T) {
|
||||||
// Mark one as complete
|
// Mark one as complete
|
||||||
allocs[5].ClientStatus = structs.AllocClientStatusComplete
|
allocs[5].ClientStatus = structs.AllocClientStatusComplete
|
||||||
|
|
||||||
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, true, job.ID, job, nil, allocs, nil, "")
|
reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, true, job.ID, job,
|
||||||
|
nil, allocs, nil, "", 50)
|
||||||
reconciler.now = now
|
reconciler.now = now
|
||||||
r := reconciler.Compute()
|
r := reconciler.Compute()
|
||||||
|
|
||||||
|
|
|
@ -111,6 +111,10 @@ The table below shows this endpoint's support for
|
||||||
the register only occurs if the job is new. This paradigm allows check-and-set
|
the register only occurs if the job is new. This paradigm allows check-and-set
|
||||||
style job updating.
|
style job updating.
|
||||||
|
|
||||||
|
- `EvalPriority` `(int: 0)` - Override the priority of the evaluations produced
|
||||||
|
as a result of this job registration. By default, this is set to the priority
|
||||||
|
of the job.
|
||||||
|
|
||||||
- `JobModifyIndex` `(int: 0)` - Specifies the `JobModifyIndex` to enforce the
|
- `JobModifyIndex` `(int: 0)` - Specifies the `JobModifyIndex` to enforce the
|
||||||
current job is at.
|
current job is at.
|
||||||
|
|
||||||
|
@ -1558,6 +1562,10 @@ The table below shows this endpoint's support for
|
||||||
the register only occurs if the job is new. This paradigm allows check-and-set
|
the register only occurs if the job is new. This paradigm allows check-and-set
|
||||||
style job updating.
|
style job updating.
|
||||||
|
|
||||||
|
- `EvalPriority` `(int: 0)` - Override the priority of the evaluations produced
|
||||||
|
as a result of this job update. By default, this is set to the priority of
|
||||||
|
the job.
|
||||||
|
|
||||||
- `JobModifyIndex` `(int: 0)` - Specifies the `JobModifyIndex` to enforce the
|
- `JobModifyIndex` `(int: 0)` - Specifies the `JobModifyIndex` to enforce the
|
||||||
current job is at.
|
current job is at.
|
||||||
|
|
||||||
|
@ -2099,6 +2107,10 @@ The table below shows this endpoint's support for
|
||||||
- `:job_id` `(string: <required>)` - Specifies the ID of the job (as specified in
|
- `:job_id` `(string: <required>)` - Specifies the ID of the job (as specified in
|
||||||
the job file during submission). This is specified as part of the path.
|
the job file during submission). This is specified as part of the path.
|
||||||
|
|
||||||
|
- `eval_priority` `(int: 0)` - Override the priority of the evaluations produced
|
||||||
|
as a result of this job deregistration. By default, this is set to the priority
|
||||||
|
of the job.
|
||||||
|
|
||||||
- `global` `(bool: false)` - Stop a multi-region job in all its regions. By default,
|
- `global` `(bool: false)` - Stop a multi-region job in all its regions. By default,
|
||||||
job stop will stop only a single region at a time. Ignored for single-region jobs.
|
job stop will stop only a single region at a time. Ignored for single-region jobs.
|
||||||
|
|
||||||
|
|
|
@ -70,6 +70,9 @@ that volume.
|
||||||
will be output, which can be used to examine the evaluation using the
|
will be output, which can be used to examine the evaluation using the
|
||||||
[eval status] command.
|
[eval status] command.
|
||||||
|
|
||||||
|
- `-eval-priority`: Override the priority of the evaluations produced as a result
|
||||||
|
of this job submission. By default, this is set to the priority of the job.
|
||||||
|
|
||||||
- `-hcl1`: If set, HCL1 parser is used for parsing the job spec.
|
- `-hcl1`: If set, HCL1 parser is used for parsing the job spec.
|
||||||
|
|
||||||
- `-hcl2-strict`: Whether an error should be produced from the HCL2 parser where
|
- `-hcl2-strict`: Whether an error should be produced from the HCL2 parser where
|
||||||
|
|
|
@ -40,6 +40,9 @@ When ACLs are enabled, this command requires a token with the `submit-job`,
|
||||||
deregister command is submitted, a new evaluation ID is printed to the screen,
|
deregister command is submitted, a new evaluation ID is printed to the screen,
|
||||||
which can be used to examine the evaluation using the [eval status] command.
|
which can be used to examine the evaluation using the [eval status] command.
|
||||||
|
|
||||||
|
- `-eval-priority`: Override the priority of the evaluations produced as a result
|
||||||
|
of this job deregistration. By default, this is set to the priority of the job.
|
||||||
|
|
||||||
- `-verbose`: Show full information.
|
- `-verbose`: Show full information.
|
||||||
|
|
||||||
- `-yes`: Automatic yes to prompts.
|
- `-yes`: Automatic yes to prompts.
|
||||||
|
|
Loading…
Reference in New Issue