node pools: add `node_pool` field to job spec (#17379)

This changeset only adds the `node_pool` field to the jobspec, and ensures that
it gets picked up correctly as a change. Without the rest of the implementation
landed yet, the field will be ignored.
This commit is contained in:
Tim Gross 2023-06-01 16:08:55 -04:00 committed by GitHub
parent c61e75f302
commit 4f14fa0518
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 124 additions and 2 deletions

View File

@ -38,6 +38,9 @@ const (
// DefaultNamespace is the default namespace.
DefaultNamespace = "default"
// NodePoolDefault is the default node pool.
NodePoolDefault = "default"
// For Job configuration, GlobalRegion is a sentinel region value
// that users may specify to indicate the job should be run on
// the region of the node that the job was submitted to.
@ -793,9 +796,11 @@ func (m *Multiregion) Copy() *Multiregion {
copyRegion.Name = region.Name
copyRegion.Count = pointerOf(*region.Count)
copyRegion.Datacenters = append(copyRegion.Datacenters, region.Datacenters...)
copyRegion.NodePool = region.NodePool
for k, v := range region.Meta {
copyRegion.Meta[k] = v
}
copy.Regions = append(copy.Regions, copyRegion)
}
return copy
@ -810,6 +815,7 @@ type MultiregionRegion struct {
Name string `hcl:",label"`
Count *int `hcl:"count,optional"`
Datacenters []string `hcl:"datacenters,optional"`
NodePool string `hcl:"node_pool,optional"`
Meta map[string]string `hcl:"meta,block"`
}
@ -943,6 +949,7 @@ type Job struct {
Priority *int `hcl:"priority,optional"`
AllAtOnce *bool `mapstructure:"all_at_once" hcl:"all_at_once,optional"`
Datacenters []string `hcl:"datacenters,optional"`
NodePool *string `hcl:"node_pool,optional"`
Constraints []*Constraint `hcl:"constraint,block"`
Affinities []*Affinity `hcl:"affinity,block"`
TaskGroups []*TaskGroup `hcl:"group,block"`
@ -1014,8 +1021,8 @@ func (j *Job) Canonicalize() {
if j.Region == nil {
j.Region = pointerOf(GlobalRegion)
}
if j.Namespace == nil {
j.Namespace = pointerOf("default")
if j.NodePool == nil {
j.NodePool = pointerOf(NodePoolDefault)
}
if j.Type == nil {
j.Type = pointerOf("service")

View File

@ -284,6 +284,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: pointerOf("service"),
ParentID: pointerOf(""),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),
@ -379,6 +380,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: pointerOf("batch"),
ParentID: pointerOf(""),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),
@ -457,6 +459,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: pointerOf("service"),
ParentID: pointerOf("lol"),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),
@ -626,6 +629,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Name: pointerOf("example_template"),
ParentID: pointerOf(""),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
Region: pointerOf("global"),
Type: pointerOf("service"),
AllAtOnce: pointerOf(false),
@ -796,6 +800,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Region: pointerOf("global"),
Type: pointerOf("service"),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),
@ -887,6 +892,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: pointerOf("service"),
ParentID: pointerOf("lol"),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),
@ -1062,6 +1068,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Region: pointerOf("global"),
Type: pointerOf("service"),
ParentID: pointerOf("lol"),
NodePool: pointerOf(NodePoolDefault),
Priority: pointerOf(JobDefaultPriority),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
@ -1234,6 +1241,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: pointerOf("service"),
ParentID: pointerOf("lol"),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),

View File

@ -965,6 +965,7 @@ func ApiJobToStructJob(job *api.Job) *structs.Job {
Priority: *job.Priority,
AllAtOnce: *job.AllAtOnce,
Datacenters: job.Datacenters,
NodePool: *job.NodePool,
Payload: job.Payload,
Meta: job.Meta,
ConsulToken: *job.ConsulToken,
@ -1028,6 +1029,7 @@ func ApiJobToStructJob(job *api.Job) *structs.Job {
r.Name = region.Name
r.Count = *region.Count
r.Datacenters = region.Datacenters
r.NodePool = region.NodePool
r.Meta = region.Meta
j.Multiregion.Regions = append(j.Multiregion.Regions, r)
}

View File

@ -2844,6 +2844,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
Priority: 50,
AllAtOnce: true,
Datacenters: []string{"dc1", "dc2"},
NodePool: "default",
Constraints: []*structs.Constraint{
{
LTarget: "a",
@ -3380,6 +3381,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
Priority: 50,
AllAtOnce: true,
Datacenters: []string{"dc1", "dc2"},
NodePool: "default",
Constraints: []*structs.Constraint{
{
LTarget: "a",

View File

@ -176,6 +176,7 @@ func (c *JobStatusCommand) Run(args []string) int {
fmt.Sprintf("Priority|%d", *job.Priority),
fmt.Sprintf("Datacenters|%s", strings.Join(job.Datacenters, ",")),
fmt.Sprintf("Namespace|%s", *job.Namespace),
fmt.Sprintf("Node Pool|%s", *job.NodePool),
fmt.Sprintf("Status|%s", getStatusString(*job.Status, job.Stop)),
fmt.Sprintf("Periodic|%v", periodic),
fmt.Sprintf("Parameterized|%v", parameterized),

View File

@ -742,6 +742,7 @@ func Test_jobCanonicalizer_Mutate(t *testing.T) {
expectedOutputJob: &structs.Job{
Namespace: "default",
Datacenters: []string{"*"},
NodePool: structs.NodePoolDefault,
Priority: 123,
},
},
@ -755,6 +756,7 @@ func Test_jobCanonicalizer_Mutate(t *testing.T) {
expectedOutputJob: &structs.Job{
Namespace: "default",
Datacenters: []string{"*"},
NodePool: structs.NodePoolDefault,
Priority: serverJobDefaultPriority,
},
},

View File

@ -441,6 +441,76 @@ func TestJobDiff(t *testing.T) {
},
},
},
{
// NodePool added
Old: &Job{},
New: &Job{
NodePool: "default",
},
Expected: &JobDiff{
Type: DiffTypeEdited,
Fields: []*FieldDiff{
{
Type: DiffTypeAdded,
Name: "NodePool",
Old: "",
New: "default",
},
},
},
},
{
// NodePool removed
Old: &Job{
NodePool: "default",
},
New: &Job{},
Expected: &JobDiff{
Type: DiffTypeEdited,
Fields: []*FieldDiff{
{
Type: DiffTypeDeleted,
Name: "NodePool",
Old: "default",
New: "",
},
},
},
},
{
// NodePool changed
Old: &Job{
NodePool: "default",
},
New: &Job{
NodePool: "foo",
},
Expected: &JobDiff{
Type: DiffTypeEdited,
Fields: []*FieldDiff{
{
Type: DiffTypeEdited,
Name: "NodePool",
Old: "default",
New: "foo",
},
},
},
},
{
// NodePool unchanged
Old: &Job{
NodePool: "foo",
},
New: &Job{
NodePool: "foo",
},
Expected: &JobDiff{
Type: DiffTypeNone,
},
},
{
// Periodic added
Old: &Job{},

View File

@ -4385,6 +4385,9 @@ type Job struct {
// Datacenters contains all the datacenters this job is allowed to span
Datacenters []string
// NodePool specifies the node pool this job is allowed to run on
NodePool string
// Constraints can be specified at a job level and apply to
// all the task groups and tasks.
Constraints []*Constraint
@ -4535,6 +4538,10 @@ func (j *Job) Canonicalize() {
j.Datacenters = []string{"*"}
}
if j.NodePool == "" {
j.NodePool = NodePoolDefault
}
for _, tg := range j.TaskGroups {
tg.Canonicalize(j)
}
@ -4617,6 +4624,10 @@ func (j *Job) Validate() error {
}
}
}
if j.NodePool == "" {
mErr.Errors = append(mErr.Errors, errors.New("Job must be in a node_pool"))
}
if len(j.TaskGroups) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups"))
}
@ -4827,6 +4838,7 @@ func (j *Job) Stub(summary *JobSummary, fields *JobStubFields) *JobListStub {
ParentID: j.ParentID,
Name: j.Name,
Datacenters: j.Datacenters,
NodePool: j.NodePool,
Multiregion: j.Multiregion,
Type: j.Type,
Priority: j.Priority,
@ -5019,6 +5031,7 @@ type JobListStub struct {
Name string
Namespace string `json:",omitempty"`
Datacenters []string
NodePool string
Multiregion *Multiregion
Type string
Priority int
@ -5279,6 +5292,7 @@ func (m *Multiregion) Copy() *Multiregion {
Name: region.Name,
Count: region.Count,
Datacenters: []string{},
NodePool: region.NodePool,
Meta: map[string]string{},
}
copyRegion.Datacenters = append(copyRegion.Datacenters, region.Datacenters...)
@ -5299,6 +5313,7 @@ type MultiregionRegion struct {
Name string
Count int
Datacenters []string
NodePool string
Meta map[string]string
}

View File

@ -461,6 +461,7 @@ func testJob() *Job {
Priority: JobDefaultPriority,
AllAtOnce: false,
Datacenters: []string{"*"},
NodePool: NodePoolDefault,
Constraints: []*Constraint{
{
LTarget: "$attr.kernel.name",

View File

@ -152,6 +152,7 @@ The table below shows this endpoint's support for
"Job": {
"Datacenters": ["dc1"],
"ID": "cache",
"NodePool": "prod",
"TaskGroups": [
{
"Name": "cache",
@ -288,6 +289,7 @@ $ curl \
"ModifyIndex": 0,
"Name": "my-job",
"Namespace": "default",
"NodePool": "prod",
"ParameterizedJob": null,
"ParentID": "",
"Payload": null,
@ -357,6 +359,7 @@ $ curl \
"Priority": 50,
"AllAtOnce": false,
"Datacenters": ["dc1"],
"NodePool": "prod",
"Constraints": [
{
"LTarget": "${attr.kernel.name}",
@ -675,6 +678,7 @@ $ curl \
"Name": "example",
"Namespace": "default",
"NomadTokenID": "",
"NodePool": "prod",
"ParameterizedJob": null,
"ParentID": "",
"Payload": null,
@ -888,6 +892,7 @@ $ curl \
"Multiregion": null,
"Name": "example",
"Namespace": "default",
"NodePool": "prod",
"NomadTokenID": "",
"ParameterizedJob": null,
"ParentID": "",
@ -1057,6 +1062,7 @@ $ curl \
"Multiregion": null,
"Name": "example",
"Namespace": "default",
"NodePool": "prod",
"NomadTokenID": "",
"ParameterizedJob": null,
"ParentID": "",

View File

@ -62,6 +62,7 @@ $ nomad job run -output example.nomad.hcl
"Datacenters": [
"dc1"
],
"NodePool": "prod",
"Constraints": null,
"Affinities": null,
"TaskGroups": [
@ -256,6 +257,8 @@ The `Job` object supports the following keys:
- `Datacenters` - A list of datacenters in the region which are eligible
for task placement. This must be provided, and does not have a default.
- `NodePool` - The node pool in which the job can be placed. Defaults to `"default"`.
- `TaskGroups` - A list to define additional task groups. See the task group
reference for more details.

View File

@ -24,6 +24,8 @@ job "docs" {
datacenters = ["us-east-1"]
node_pool = "prod"
group "example" {
# ...
}
@ -81,6 +83,9 @@ job "docs" {
through the use of `*` for multi-character matching. The default value is
`["*"]`, which allows the job to be placed in any available datacenter.
- `node_pool` `(string: <optional>)` - Specifies the node pool to place the job
in. The node pool must exist when the job is registered. Defaults to `"default"`.
- `group` <code>([Group][group]: &lt;required&gt;)</code> - Specifies the start of a
group of tasks. This can be provided multiple times to define additional
groups. Group names must be unique within the job file.