Merge pull request #8360 from hashicorp/b-8355-better-scaling-validation

better error handling around Scaling->Max
This commit is contained in:
Chris Baker 2020-07-06 11:32:02 -05:00 committed by GitHub
commit 5b96c3d50e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 51 additions and 20 deletions

View file

@ -22,6 +22,7 @@ IMPROVEMENTS:
* core: Support for persisting previous task group counts when updating a job [[GH-8168](https://github.com/hashicorp/nomad/issues/8168)]
* core: Block Job.Scale actions when the job is under active deployment [[GH-8187](https://github.com/hashicorp/nomad/issues/8187)]
* api: Better error messages around Scaling->Max [[GH-8360](https://github.com/hashicorp/nomad/issues/8360)]
* api: Persist previous count with scaling events [[GH-8167](https://github.com/hashicorp/nomad/issues/8167)]
* api: Support querying for jobs and allocations across all namespaces [[GH-8192](https://github.com/hashicorp/nomad/issues/8192)]
* api: New `/agent/host` endpoint returns diagnostic information about the host [[GH-8325](https://github.com/hashicorp/nomad/pull/8325)]

View file

@ -56,7 +56,7 @@ type ScalingPolicy struct {
Namespace string
Target map[string]string
Min *int64
Max int64
Max *int64
Policy map[string]interface{}
Enabled *bool
CreateIndex uint64

View file

@ -23,7 +23,7 @@ func TestScalingPolicies_ListPolicies(t *testing.T) {
// Register a job with a scaling policy
job := testJob()
job.TaskGroups[0].Scaling = &ScalingPolicy{
Max: 100,
Max: int64ToPtr(100),
}
_, _, err = jobs.Register(job, nil)
require.NoError(err)
@ -75,7 +75,7 @@ func TestScalingPolicies_GetPolicy(t *testing.T) {
policy := &ScalingPolicy{
Enabled: boolToPtr(true),
Min: int64ToPtr(1),
Max: 1,
Max: int64ToPtr(1),
Policy: map[string]interface{}{
"key": "value",
},

View file

@ -444,7 +444,7 @@ func TestTaskGroup_Canonicalize_Scaling(t *testing.T) {
Count: nil,
Scaling: &ScalingPolicy{
Min: nil,
Max: 10,
Max: int64ToPtr(10),
Policy: nil,
Enabled: nil,
CreateIndex: 0,

View file

@ -53,7 +53,7 @@ func testJobWithScalingPolicy() *Job {
job.TaskGroups[0].Scaling = &ScalingPolicy{
Policy: map[string]interface{}{},
Min: int64ToPtr(1),
Max: 1,
Max: int64ToPtr(1),
Enabled: boolToPtr(true),
}
return job

View file

@ -78,10 +78,15 @@ func (s *HTTPServer) scalingPolicyQuery(resp http.ResponseWriter, req *http.Requ
func ApiScalingPolicyToStructs(count int, ap *api.ScalingPolicy) *structs.ScalingPolicy {
p := structs.ScalingPolicy{
Enabled: *ap.Enabled,
Max: ap.Max,
Policy: ap.Policy,
Target: map[string]string{},
}
if ap.Max != nil {
p.Max = *ap.Max
} else {
// catch this in Validate
p.Max = -1
}
if ap.Min != nil {
p.Min = *ap.Min
} else {

View file

@ -364,6 +364,9 @@ func parseScalingPolicy(out **api.ScalingPolicy, list *ast.ObjectList) error {
if err := dec.Decode(m); err != nil {
return err
}
if result.Max == nil {
return fmt.Errorf("missing 'max'")
}
// If we have policy, then parse that
if o := listVal.Filter("policy"); len(o.Items) > 0 {

View file

@ -1320,7 +1320,7 @@ func TestParse(t *testing.T) {
Name: helper.StringToPtr("group"),
Scaling: &api.ScalingPolicy{
Min: helper.Int64ToPtr(5),
Max: 100,
Max: helper.Int64ToPtr(100),
Policy: map[string]interface{}{
"foo": "bar",
"b": true,
@ -1345,7 +1345,7 @@ func TestParse(t *testing.T) {
Name: helper.StringToPtr("group"),
Scaling: &api.ScalingPolicy{
Min: nil,
Max: 0,
Max: helper.Int64ToPtr(10),
Policy: nil,
Enabled: nil,
},
@ -1355,6 +1355,12 @@ func TestParse(t *testing.T) {
false,
},
{
"tg-scaling-policy-missing-max.hcl",
nil,
true,
},
{
"tg-scaling-policy-multi-policy.hcl",
nil,

View file

@ -1,5 +1,7 @@
job "elastic" {
group "group" {
scaling {}
scaling {
max = 10
}
}
}

View file

@ -0,0 +1,7 @@
job "elastic" {
group "group" {
scaling {
// required: max = ...
}
}
}

View file

@ -5948,21 +5948,26 @@ func (tg *TaskGroup) validateScalingPolicy(j *Job) error {
var mErr multierror.Error
if tg.Scaling.Min > tg.Scaling.Max {
// was invalid or not specified; don't bother testing anything else involving max
if tg.Scaling.Max < 0 {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Scaling policy invalid: maximum count must not be less than minimum count"))
fmt.Errorf("Scaling policy invalid: maximum count must be specified and non-negative"))
} else {
if tg.Scaling.Max < tg.Scaling.Min {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Scaling policy invalid: maximum count must not be less than minimum count"))
}
if tg.Scaling.Max < int64(tg.Count) {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Scaling policy invalid: task group count must not be greater than maximum count in scaling policy"))
}
}
if int64(tg.Count) < tg.Scaling.Min && !(j.IsMultiregion() && tg.Count == 0) {
if int64(tg.Count) < tg.Scaling.Min && !(j.IsMultiregion() && tg.Count == 0 && j.Region == "global") {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Scaling policy invalid: task group count must not be less than minimum count in scaling policy"))
}
if tg.Scaling.Max < int64(tg.Count) {
mErr.Errors = append(mErr.Errors,
fmt.Errorf("Scaling policy invalid: task group count must not be greater than maximum count in scaling policy"))
}
return mErr.ErrorOrNil()
}

View file

@ -56,7 +56,7 @@ type ScalingPolicy struct {
Namespace string
Target map[string]string
Min *int64
Max int64
Max *int64
Policy map[string]interface{}
Enabled *bool
CreateIndex uint64

View file

@ -36,8 +36,10 @@ job "docs" {
node attribute or metadata. See the
[Nomad spread reference](/docs/job-specification/spread) for more details.
- `count` `(int: 1)` - Specifies the number of the task groups that should
be running under this group. This value must be non-negative.
- `count` `(int)` - Specifies the number of instances that should be running
under for this group. This value must be non-negative. This defaults to the
`min` value specified in the [`scaling`](/docs/job-specification/scaling)
block, if present; otherwise, this defaults to `1`.
- `ephemeral_disk` <code>([EphemeralDisk][]: nil)</code> - Specifies the
ephemeral disk requirements of the group. Ephemeral disks can be marked as