node pools: namespace integration (#17562)

Add structs and fields to support the Nomad Pools Governance Enterprise
feature of controlling node pool access via namespaces.

Nomad Enterprise allows users to specify a default node pool to be used
by jobs that don't specify one. In order to accomplish this, it's
necessary to distinguish between a job that explicitly uses the
`default` node pool and one that did not specify any.

If the `default` node pool is set during job canonicalization it's
impossible to do this, so this commit allows a job to have an empty node
pool value during registration but sets to `default` at the admission
controller mutator.

In order to guarantee state consistency the state store validates that
the job node pool is set and exists before inserting it.
This commit is contained in:
Luiz Aoqui 2023-06-16 16:30:22 -04:00 committed by GitHub
parent 3da948d0c8
commit d5aa72190f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 750 additions and 59 deletions

View File

@ -1019,7 +1019,7 @@ func (j *Job) Canonicalize() {
j.Region = pointerOf(GlobalRegion)
}
if j.NodePool == nil {
j.NodePool = pointerOf(NodePoolDefault)
j.NodePool = pointerOf("")
}
if j.Type == nil {
j.Type = pointerOf("service")

View File

@ -284,7 +284,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: pointerOf("service"),
ParentID: pointerOf(""),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
NodePool: pointerOf(""),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),
@ -380,7 +380,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: pointerOf("batch"),
ParentID: pointerOf(""),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
NodePool: pointerOf(""),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),
@ -459,7 +459,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: pointerOf("service"),
ParentID: pointerOf("lol"),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
NodePool: pointerOf(""),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),
@ -629,7 +629,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Name: pointerOf("example_template"),
ParentID: pointerOf(""),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
NodePool: pointerOf(""),
Region: pointerOf("global"),
Type: pointerOf("service"),
AllAtOnce: pointerOf(false),
@ -800,7 +800,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Region: pointerOf("global"),
Type: pointerOf("service"),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
NodePool: pointerOf(""),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),
@ -892,7 +892,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: pointerOf("service"),
ParentID: pointerOf("lol"),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
NodePool: pointerOf(""),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),
@ -1068,7 +1068,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Region: pointerOf("global"),
Type: pointerOf("service"),
ParentID: pointerOf("lol"),
NodePool: pointerOf(NodePoolDefault),
NodePool: pointerOf(""),
Priority: pointerOf(JobDefaultPriority),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
@ -1241,7 +1241,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Type: pointerOf("service"),
ParentID: pointerOf("lol"),
Priority: pointerOf(JobDefaultPriority),
NodePool: pointerOf(NodePoolDefault),
NodePool: pointerOf(""),
AllAtOnce: pointerOf(false),
ConsulToken: pointerOf(""),
ConsulNamespace: pointerOf(""),

View File

@ -74,16 +74,27 @@ type Namespace struct {
Description string
Quota string
Capabilities *NamespaceCapabilities `hcl:"capabilities,block"`
NodePoolConfiguration *NamespaceNodePoolConfiguration `hcl:"node_pool_config,block"`
Meta map[string]string
CreateIndex uint64
ModifyIndex uint64
}
// NamespaceCapabilities represents a set of capabilities allowed for this
// namespace, to be checked at job submission time.
type NamespaceCapabilities struct {
EnabledTaskDrivers []string `hcl:"enabled_task_drivers"`
DisabledTaskDrivers []string `hcl:"disabled_task_drivers"`
}
// NamespaceNodePoolConfiguration stores configuration about node pools for a
// namespace.
type NamespaceNodePoolConfiguration struct {
Default string
Allowed []string
Denied []string
}
// NamespaceIndexSort is a wrapper to sort Namespaces by CreateIndex. We
// reverse the test so that we get the highest index first.
type NamespaceIndexSort []*Namespace

View File

@ -2844,7 +2844,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
Priority: 50,
AllAtOnce: true,
Datacenters: []string{"dc1", "dc2"},
NodePool: "default",
NodePool: "",
Constraints: []*structs.Constraint{
{
LTarget: "a",
@ -3261,6 +3261,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
Priority: pointer.Of(50),
AllAtOnce: pointer.Of(true),
Datacenters: []string{"dc1", "dc2"},
NodePool: pointer.Of("default"),
Constraints: []*api.Constraint{
{
LTarget: "a",

View File

@ -226,6 +226,7 @@ func parseNamespaceSpecImpl(result *api.Namespace, list *ast.ObjectList) error {
delete(m, "capabilities")
delete(m, "meta")
delete(m, "node_pool_config")
// Decode the rest
if err := mapstructure.WeakDecode(m, result); err != nil {
@ -248,6 +249,22 @@ func parseNamespaceSpecImpl(result *api.Namespace, list *ast.ObjectList) error {
}
}
npObj := list.Filter("node_pool_config")
if len(npObj.Items) > 0 {
for _, o := range npObj.Elem().Items {
ot, ok := o.Val.(*ast.ObjectType)
if !ok {
break
}
var npConfig *api.NamespaceNodePoolConfiguration
if err := hcl.DecodeObject(&npConfig, ot.List); err != nil {
return err
}
result.NodePoolConfiguration = npConfig
break
}
}
if metaO := list.Filter("meta"); len(metaO.Items) > 0 {
for _, o := range metaO.Elem().Items {
var m map[string]interface{}

View File

@ -7,8 +7,10 @@ import (
"strings"
"testing"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/ci"
"github.com/mitchellh/cli"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/assert"
)
@ -60,3 +62,71 @@ func TestNamespaceApplyCommand_Good(t *testing.T) {
assert.Nil(t, err)
assert.Len(t, namespaces, 2)
}
func TestNamespaceApplyCommand_parseNamesapceSpec(t *testing.T) {
ci.Parallel(t)
testCases := []struct {
name string
input string
expected *api.Namespace
}{
{
name: "valid namespace",
input: `
name = "test-namespace"
description = "Test namespace"
quota = "test"
capabilities {
enabled_task_drivers = ["exec", "docker"]
disabled_task_drivers = ["raw_exec"]
}
node_pool_config {
default = "dev"
allowed = ["prod*"]
}
meta {
dept = "eng"
}`,
expected: &api.Namespace{
Name: "test-namespace",
Description: "Test namespace",
Quota: "test",
Capabilities: &api.NamespaceCapabilities{
EnabledTaskDrivers: []string{"exec", "docker"},
DisabledTaskDrivers: []string{"raw_exec"},
},
NodePoolConfiguration: &api.NamespaceNodePoolConfiguration{
Default: "dev",
Allowed: []string{"prod*"},
},
Meta: map[string]string{
"dept": "eng",
},
},
},
{
name: "minimal",
input: `name = "test-small"`,
expected: &api.Namespace{
Name: "test-small",
},
},
{
name: "empty",
input: "",
expected: &api.Namespace{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got, err := parseNamespaceSpec([]byte(tc.input))
must.NoError(t, err)
must.Eq(t, tc.expected, got)
})
}
}

View File

@ -149,6 +149,21 @@ func (c *NamespaceStatusCommand) Run(args []string) int {
}
}
if ns.NodePoolConfiguration != nil {
c.Ui.Output(c.Colorize().Color("\n[bold]Node Pool Configuration[reset]"))
npConfig := ns.NodePoolConfiguration
npConfigOut := []string{
fmt.Sprintf("Default|%s", npConfig.Default),
}
if len(npConfig.Allowed) > 0 {
npConfigOut = append(npConfigOut, fmt.Sprintf("Allowed|%s", strings.Join(npConfig.Allowed, ", ")))
}
if len(npConfig.Denied) > 0 {
npConfigOut = append(npConfigOut, fmt.Sprintf("Denied|%s", strings.Join(npConfig.Denied, ", ")))
}
c.Ui.Output(formatKV(npConfigOut))
}
return 0
}

View File

@ -24,5 +24,9 @@ func (c jobNodePoolMutatingHook) Name() string {
}
func (c jobNodePoolMutatingHook) Mutate(job *structs.Job) (*structs.Job, []error, error) {
if job.NodePool == "" {
job.NodePool = structs.NodePoolDefault
}
return job, nil, nil
}

View File

@ -742,7 +742,6 @@ func Test_jobCanonicalizer_Mutate(t *testing.T) {
expectedOutputJob: &structs.Job{
Namespace: "default",
Datacenters: []string{"*"},
NodePool: structs.NodePoolDefault,
Priority: 123,
},
},
@ -756,7 +755,6 @@ func Test_jobCanonicalizer_Mutate(t *testing.T) {
expectedOutputJob: &structs.Job{
Namespace: "default",
Datacenters: []string{"*"},
NodePool: structs.NodePoolDefault,
Priority: serverJobDefaultPriority,
},
},

View File

@ -18,6 +18,7 @@ import (
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/shoenig/test/must"
"github.com/stretchr/testify/require"
)
@ -335,3 +336,89 @@ func TestJobEndpoint_Register_Connect_AllowUnauthenticatedFalse_oss(t *testing.T
})
})
}
func TestJobEndpoint_Register_NodePool(t *testing.T) {
ci.Parallel(t)
s, cleanupS := TestServer(t, func(c *Config) {
c.NumSchedulers = 0
})
defer cleanupS()
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
// Create test namespace.
ns := mock.Namespace()
nsReq := &structs.NamespaceUpsertRequest{
Namespaces: []*structs.Namespace{ns},
WriteRequest: structs.WriteRequest{Region: "global"},
}
var nsResp structs.GenericResponse
err := msgpackrpc.CallWithCodec(codec, "Namespace.UpsertNamespaces", nsReq, &nsResp)
must.NoError(t, err)
// Create test node pool.
pool := mock.NodePool()
poolReq := &structs.NodePoolUpsertRequest{
NodePools: []*structs.NodePool{pool},
WriteRequest: structs.WriteRequest{Region: "global"},
}
var poolResp structs.GenericResponse
err = msgpackrpc.CallWithCodec(codec, "NodePool.UpsertNodePools", poolReq, &poolResp)
must.NoError(t, err)
testCases := []struct {
name string
namespace string
nodePool string
expectedPool string
expectedErr string
}{
{
name: "job in default namespace uses default node pool",
namespace: structs.DefaultNamespace,
nodePool: "",
expectedPool: structs.NodePoolDefault,
},
{
name: "job without node pool uses default node pool",
namespace: ns.Name,
nodePool: "",
expectedPool: structs.NodePoolDefault,
},
{
name: "job can set node pool",
namespace: ns.Name,
nodePool: pool.Name,
expectedPool: pool.Name,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
job := mock.Job()
job.Namespace = tc.namespace
job.NodePool = tc.nodePool
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.JobRegisterResponse
err = msgpackrpc.CallWithCodec(codec, "Job.Register", req, &resp)
if tc.expectedErr != "" {
must.ErrorContains(t, err, tc.expectedErr)
} else {
must.NoError(t, err)
got, err := s.State().JobByID(nil, job.Namespace, job.ID)
must.NoError(t, err)
must.Eq(t, tc.expectedPool, got.NodePool)
}
})
}
}

View File

@ -17,6 +17,7 @@ func Job() *structs.Job {
ID: fmt.Sprintf("mock-service-%s", uuid.Generate()),
Name: "my-job",
Namespace: structs.DefaultNamespace,
NodePool: structs.NodePoolDefault,
Type: structs.JobTypeService,
Priority: structs.JobDefaultPriority,
AllAtOnce: false,
@ -258,6 +259,7 @@ func SystemBatchJob() *structs.Job {
ID: fmt.Sprintf("mock-sysbatch-%s", uuid.Short()),
Name: "my-sysbatch",
Namespace: structs.DefaultNamespace,
NodePool: structs.NodePoolDefault,
Type: structs.JobTypeSysBatch,
Priority: 10,
Datacenters: []string{"dc1"},
@ -326,6 +328,7 @@ func BatchJob() *structs.Job {
ID: fmt.Sprintf("mock-batch-%s", uuid.Generate()),
Name: "batch-job",
Namespace: structs.DefaultNamespace,
NodePool: structs.NodePoolDefault,
Type: structs.JobTypeBatch,
Priority: structs.JobDefaultPriority,
AllAtOnce: false,
@ -390,6 +393,7 @@ func SystemJob() *structs.Job {
job := &structs.Job{
Region: "global",
Namespace: structs.DefaultNamespace,
NodePool: structs.NodePoolDefault,
ID: fmt.Sprintf("mock-system-%s", uuid.Generate()),
Name: "my-job",
Type: structs.JobTypeSystem,
@ -469,6 +473,7 @@ func MaxParallelJob() *structs.Job {
ID: fmt.Sprintf("mock-service-%s", uuid.Generate()),
Name: "my-job",
Namespace: structs.DefaultNamespace,
NodePool: structs.NodePoolDefault,
Type: structs.JobTypeService,
Priority: structs.JobDefaultPriority,
AllAtOnce: false,

View File

@ -247,6 +247,7 @@ func Namespace() *structs.Namespace {
CreateIndex: 100,
ModifyIndex: 200,
}
ns.Canonicalize()
ns.SetHash()
return ns
}

View File

@ -891,16 +891,18 @@ func TestNodePoolEndpoint_DeleteNodePools(t *testing.T) {
for i := 0; i < 10; i++ {
pools = append(pools, mock.NodePool())
}
err := store.UpsertNodePools(structs.MsgTypeTestSetup, 100, pools)
must.NoError(t, err)
// Insert a node and job to block deleting
node := mock.Node()
node.NodePool = pools[3].Name
must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 100, node))
must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 101, node))
job := mock.MinJob()
job.NodePool = pools[4].Name
job.Status = structs.JobStatusRunning
must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job))
must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 102, nil, job))
testCases := []struct {
name string
@ -998,17 +1000,17 @@ func TestNodePoolEndpoint_DeleteNodePools_ACL(t *testing.T) {
testutil.WaitForLeader(t, s.RPC)
// Create test ACL tokens.
devToken := mock.CreatePolicyAndToken(t, store, 1001, "dev-node-pools",
devToken := mock.CreatePolicyAndToken(t, store, 100, "dev-node-pools",
mock.NodePoolPolicy("dev-*", "write", nil),
)
devSpecificToken := mock.CreatePolicyAndToken(t, store, 1003, "dev-1-node-pools",
devSpecificToken := mock.CreatePolicyAndToken(t, store, 102, "dev-1-node-pools",
mock.NodePoolPolicy("dev-1", "write", nil),
)
prodToken := mock.CreatePolicyAndToken(t, store, 1005, "prod-node-pools",
prodToken := mock.CreatePolicyAndToken(t, store, 104, "prod-node-pools",
mock.NodePoolPolicy("prod-*", "", []string{"delete"}),
)
noPolicyToken := mock.CreateToken(t, store, 1007, nil)
noDeleteToken := mock.CreatePolicyAndToken(t, store, 1009, "node-pools-no-delete",
noPolicyToken := mock.CreateToken(t, store, 106, nil)
noDeleteToken := mock.CreatePolicyAndToken(t, store, 107, "node-pools-no-delete",
mock.NodePoolPolicy("*", "", []string{"read", "write"}),
)
@ -1027,16 +1029,18 @@ func TestNodePoolEndpoint_DeleteNodePools_ACL(t *testing.T) {
qaPool.Name = fmt.Sprintf("qa-%d", i)
pools = append(pools, qaPool)
}
err := store.UpsertNodePools(structs.MsgTypeTestSetup, 108, pools)
must.NoError(t, err)
// Insert a node and job to block deleting
node := mock.Node()
node.NodePool = "prod-3"
must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 100, node))
must.NoError(t, store.UpsertNode(structs.MsgTypeTestSetup, 109, node))
job := mock.MinJob()
job.NodePool = "prod-4"
job.Status = structs.JobStatusRunning
must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 101, nil, job))
must.NoError(t, store.UpsertJob(structs.MsgTypeTestSetup, 110, nil, job))
testCases := []struct {
name string

View File

@ -1698,6 +1698,17 @@ func (s *StateStore) upsertJobImpl(index uint64, sub *structs.JobSubmission, job
return fmt.Errorf("job %q is in nonexistent namespace %q", job.ID, job.Namespace)
}
// Upgrade path.
// Assert the node pool is set and exists.
if job.NodePool == "" {
job.NodePool = structs.NodePoolDefault
}
if exists, err := s.nodePoolExists(txn, job.NodePool); err != nil {
return err
} else if !exists {
return fmt.Errorf("job %q is in nonexistent node pool %q", job.ID, job.NodePool)
}
// Check if the job already exists
existing, err := txn.First("jobs", "id", job.Namespace, job.ID)
var existingJob *structs.Job
@ -6687,6 +6698,8 @@ func (s *StateStore) UpsertNamespaces(index uint64, namespaces []*structs.Namesp
defer txn.Abort()
for _, ns := range namespaces {
// Handle upgrade path.
ns.Canonicalize()
if err := s.upsertNamespaceImpl(index, txn, ns); err != nil {
return err
}

View File

@ -94,6 +94,12 @@ func (s *StateStore) NodePoolsByNamePrefix(ws memdb.WatchSet, namePrefix string,
return iter, nil
}
// nodePoolExists returs true if a node pool with the give name exists.
func (s *StateStore) nodePoolExists(txn *txn, pool string) (bool, error) {
existing, err := txn.First(TableNodePools, "id", pool)
return existing != nil, err
}
// UpsertNodePools inserts or updates the given set of node pools.
func (s *StateStore) UpsertNodePools(msgType structs.MessageType, index uint64, pools []*structs.NodePool) error {
txn := s.db.WriteTxnMsgT(msgType, index)

View File

@ -2396,6 +2396,54 @@ func TestStateStore_UpsertJob_BadNamespace(t *testing.T) {
assert.Nil(out)
}
func TestStateStore_UpsertJob_NodePool(t *testing.T) {
ci.Parallel(t)
state := testStateStore(t)
testCases := []struct {
name string
pool string
expectedPool string
expectedErr string
}{
{
name: "empty node pool uses default",
pool: "",
expectedPool: structs.NodePoolDefault,
},
{
name: "job uses pool defined",
pool: structs.NodePoolDefault,
expectedPool: structs.NodePoolDefault,
},
{
name: "error when pool doesn't exist",
pool: "nonexisting",
expectedErr: "nonexistent node pool",
},
}
for i, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
job := mock.Job()
job.NodePool = tc.pool
err := state.UpsertJob(structs.MsgTypeTestSetup, uint64(1000+i), nil, job)
if tc.expectedErr != "" {
must.ErrorContains(t, err, tc.expectedErr)
} else {
must.NoError(t, err)
ws := memdb.NewWatchSet()
got, err := state.JobByID(ws, job.Namespace, job.ID)
must.NoError(t, err)
must.Eq(t, tc.expectedPool, got.NodePool)
}
})
}
}
// Upsert a job that is the child of a parent job and ensures its summary gets
// updated.
func TestStateStore_UpsertJob_ChildJob(t *testing.T) {

View File

@ -4420,7 +4420,12 @@ type Job struct {
// Datacenters contains all the datacenters this job is allowed to span
Datacenters []string
// NodePool specifies the node pool this job is allowed to run on
// NodePool specifies the node pool this job is allowed to run on.
//
// An empty value is allowed during job registration, in which case the
// namespace default node pool is used in Enterprise and the 'default' node
// pool in OSS. But a node pool must be set before the job is stored, so
// that will happen in the admission mutators.
NodePool string
// Constraints can be specified at a job level and apply to
@ -4573,10 +4578,6 @@ func (j *Job) Canonicalize() {
j.Datacenters = []string{"*"}
}
if j.NodePool == "" {
j.NodePool = NodePoolDefault
}
for _, tg := range j.TaskGroups {
tg.Canonicalize(j)
}
@ -4659,9 +4660,6 @@ func (j *Job) Validate() error {
}
}
}
if j.NodePool == "" {
mErr.Errors = append(mErr.Errors, errors.New("Job must be in a node_pool"))
}
if len(j.TaskGroups) == 0 {
mErr.Errors = append(mErr.Errors, errors.New("Missing job task groups"))
@ -5367,6 +5365,10 @@ type Namespace struct {
// Capabilities is the set of capabilities allowed for this namespace
Capabilities *NamespaceCapabilities
// NodePoolConfiguration is the namespace configuration for handling node
// pools.
NodePoolConfiguration *NamespaceNodePoolConfiguration
// Meta is the set of metadata key/value pairs that attached to the namespace
Meta map[string]string
@ -5386,6 +5388,28 @@ type NamespaceCapabilities struct {
DisabledTaskDrivers []string
}
// NamespaceNodePoolConfiguration stores configuration about node pools for a
// namespace.
type NamespaceNodePoolConfiguration struct {
// Default is the node pool used by jobs in this namespace that don't
// specify a node pool of their own.
Default string
// Allowed specifies the node pools that are allowed to be used by jobs in
// this namespace. This field supports wildcard globbing through the use
// of `*` for multi-character matching. If specified, only the node pools
// that match these patterns are allowed. This field cannot be used
// with Denied.
Allowed []string
// Denied specifies the node pools that are not allowed to be used by jobs
// in this namespace. This field supports wildcard globbing through the use
// of `*` for multi-character matching. If specified, any node pool is
// allowed to be used, except for those that match any of these patterns.
// This field cannot be used with Allowed.
Denied []string
}
func (n *Namespace) Validate() error {
var mErr multierror.Error
@ -5399,6 +5423,16 @@ func (n *Namespace) Validate() error {
mErr.Errors = append(mErr.Errors, err)
}
err := n.NodePoolConfiguration.Validate()
switch e := err.(type) {
case *multierror.Error:
for _, npErr := range e.Errors {
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid node pool configuration: %v", npErr))
}
case error:
mErr.Errors = append(mErr.Errors, fmt.Errorf("invalid node pool configuration: %v", e))
}
return mErr.ErrorOrNil()
}
@ -5422,6 +5456,15 @@ func (n *Namespace) SetHash() []byte {
_, _ = hash.Write([]byte(driver))
}
}
if n.NodePoolConfiguration != nil {
_, _ = hash.Write([]byte(n.NodePoolConfiguration.Default))
for _, pool := range n.NodePoolConfiguration.Allowed {
_, _ = hash.Write([]byte(pool))
}
for _, pool := range n.NodePoolConfiguration.Denied {
_, _ = hash.Write([]byte(pool))
}
}
// sort keys to ensure hash stability when meta is stored later
var keys []string
@ -5454,6 +5497,12 @@ func (n *Namespace) Copy() *Namespace {
c.DisabledTaskDrivers = slices.Clone(n.Capabilities.DisabledTaskDrivers)
nc.Capabilities = c
}
if n.NodePoolConfiguration != nil {
np := new(NamespaceNodePoolConfiguration)
*np = *n.NodePoolConfiguration
np.Allowed = slices.Clone(n.NodePoolConfiguration.Allowed)
np.Denied = slices.Clone(n.NodePoolConfiguration.Denied)
}
if n.Meta != nil {
nc.Meta = make(map[string]string, len(n.Meta))
for k, v := range n.Meta {

View File

@ -13,6 +13,17 @@ import (
multierror "github.com/hashicorp/go-multierror"
)
func (n *Namespace) Canonicalize() {}
func (n *NamespaceNodePoolConfiguration) Canonicalize() {}
func (n *NamespaceNodePoolConfiguration) Validate() error {
if n != nil {
return errors.New("Node Pools Governance is unlicensed.")
}
return nil
}
func (m *Multiregion) Validate(jobType string, jobDatacenters []string) error {
if m != nil {
return errors.New("Multiregion jobs are unlicensed.")

View File

@ -0,0 +1,46 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
//go:build !ent
// +build !ent
package structs
import (
"testing"
"github.com/hashicorp/nomad/ci"
"github.com/shoenig/test/must"
)
func TestNamespace_Validate_Oss(t *testing.T) {
ci.Parallel(t)
cases := []struct {
name string
namespace *Namespace
expectedErr string
}{
{
name: "node pool config not allowed",
namespace: &Namespace{
Name: "test",
NodePoolConfiguration: &NamespaceNodePoolConfiguration{
Default: "dev",
},
},
expectedErr: "unlicensed",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := tc.namespace.Validate()
if tc.expectedErr != "" {
must.ErrorContains(t, err, tc.expectedErr)
} else {
must.NoError(t, err)
}
})
}
}

View File

@ -23,6 +23,173 @@ import (
"github.com/stretchr/testify/require"
)
func TestNamespace_Validate(t *testing.T) {
ci.Parallel(t)
cases := []struct {
Test string
Namespace *Namespace
Expected string
}{
{
Test: "empty name",
Namespace: &Namespace{
Name: "",
},
Expected: "invalid name",
},
{
Test: "slashes in name",
Namespace: &Namespace{
Name: "foo/bar",
},
Expected: "invalid name",
},
{
Test: "too long name",
Namespace: &Namespace{
Name: strings.Repeat("a", 200),
},
Expected: "invalid name",
},
{
Test: "too long description",
Namespace: &Namespace{
Name: "foo",
Description: strings.Repeat("a", 300),
},
Expected: "description longer than",
},
{
Test: "valid",
Namespace: &Namespace{
Name: "foo",
Description: "bar",
},
},
}
for _, c := range cases {
t.Run(c.Test, func(t *testing.T) {
err := c.Namespace.Validate()
if err == nil {
if c.Expected == "" {
return
}
t.Fatalf("Expected error %q; got nil", c.Expected)
} else if c.Expected == "" {
t.Fatalf("Unexpected error %v", err)
} else if !strings.Contains(err.Error(), c.Expected) {
t.Fatalf("Expected error %q; got %v", c.Expected, err)
}
})
}
}
func TestNamespace_SetHash(t *testing.T) {
ci.Parallel(t)
ns := &Namespace{
Name: "foo",
Description: "bar",
Quota: "q1",
Capabilities: &NamespaceCapabilities{
EnabledTaskDrivers: []string{"docker"},
DisabledTaskDrivers: []string{"raw_exec"},
},
NodePoolConfiguration: &NamespaceNodePoolConfiguration{
Default: "dev",
Allowed: []string{"default"},
},
Meta: map[string]string{
"a": "b",
"c": "d",
},
}
out1 := ns.SetHash()
must.NotNil(t, out1)
must.NotNil(t, ns.Hash)
must.Eq(t, out1, ns.Hash)
ns.Description = "bam"
out2 := ns.SetHash()
must.NotNil(t, out2)
must.NotNil(t, ns.Hash)
must.Eq(t, out2, ns.Hash)
must.NotEq(t, out1, out2)
ns.Quota = "q2"
out3 := ns.SetHash()
must.NotNil(t, out3)
must.NotNil(t, ns.Hash)
must.Eq(t, out3, ns.Hash)
must.NotEq(t, out2, out3)
ns.Meta["a"] = "c"
delete(ns.Meta, "c")
ns.Meta["d"] = "e"
out4 := ns.SetHash()
must.NotNil(t, out4)
must.NotNil(t, ns.Hash)
must.Eq(t, out4, ns.Hash)
must.NotEq(t, out3, out4)
ns.Capabilities.EnabledTaskDrivers = []string{"docker", "podman"}
ns.Capabilities.DisabledTaskDrivers = []string{}
out5 := ns.SetHash()
must.NotNil(t, out5)
must.NotNil(t, ns.Hash)
must.Eq(t, out5, ns.Hash)
must.NotEq(t, out4, out5)
ns.NodePoolConfiguration.Default = "default"
ns.NodePoolConfiguration.Allowed = []string{}
ns.NodePoolConfiguration.Denied = []string{"all"}
out6 := ns.SetHash()
must.NotNil(t, out6)
must.NotNil(t, ns.Hash)
must.Eq(t, out6, ns.Hash)
must.NotEq(t, out5, out6)
}
func TestNamespace_Copy(t *testing.T) {
ci.Parallel(t)
ns := &Namespace{
Name: "foo",
Description: "bar",
Quota: "q1",
Capabilities: &NamespaceCapabilities{
EnabledTaskDrivers: []string{"docker"},
DisabledTaskDrivers: []string{"raw_exec"},
},
NodePoolConfiguration: &NamespaceNodePoolConfiguration{
Default: "dev",
Allowed: []string{"default"},
},
Meta: map[string]string{
"a": "b",
"c": "d",
},
}
ns.SetHash()
nsCopy := ns.Copy()
nsCopy.Name = "bar"
nsCopy.Description = "foo"
nsCopy.Quota = "q2"
nsCopy.Capabilities.EnabledTaskDrivers = []string{"exec"}
nsCopy.Capabilities.DisabledTaskDrivers = []string{"java"}
nsCopy.NodePoolConfiguration.Default = "default"
nsCopy.NodePoolConfiguration.Allowed = []string{}
nsCopy.NodePoolConfiguration.Denied = []string{"dev"}
nsCopy.Meta["a"] = "z"
must.NotEq(t, ns, nsCopy)
nsCopy2 := ns.Copy()
must.Eq(t, ns, nsCopy2)
}
func TestAuthenticatedIdentity_String(t *testing.T) {
ci.Parallel(t)

View File

@ -8,8 +8,6 @@ description: The /namespace endpoints are used to query for and interact with na
The `/namespace` endpoints are used to query for and interact with namespaces.
~> Prior to Nomad 1.0 Namespaces were an Enterprise-only feature.
## List Namespaces
This endpoint lists all namespaces.
@ -143,7 +141,8 @@ The table below shows this endpoint's support for
metadata to attach to the namespace. Namespace metadata is not used by Nomad
and is intended for use by operators and third party tools.
- `Quota` `(string: "")` - Specifies an quota to attach to the namespace.
- `Quota` `(string: "")` <EnterpriseAlert inline /> - Specifies an quota to
attach to the namespace.
- `Capabilities` `(Capabilities: <optional>)` - Specifies capabilities allowed
in the namespace. These values are checked at job submission.
@ -154,9 +153,28 @@ The table below shows this endpoint's support for
- `DisabledTaskDrivers` `(array<string>: [])` - List of task drivers disabled
in the namespace.
- `NodePoolConfiguration` `(NodePoolConfiguration: <optional>)` <EnterpriseAlert inline /> -
Specifies node pool configurations. These values are checked at job
submission.
- `Default` `(string: "default")` - Specifies the node pool to use for jobs
in this namespace that don't define a node pool in their specification.
- `Allowed` `(array<string>: [])` - Specifies the node pools that are allowed
to be used by jobs in this namespace. This field supports wildcard globbing
through the use of `*` for multi-character matching. If specified, only the
node pools that match these patterns are allowed. This field cannot be used
with `Disabled`.
- `disabled` `(array<string>: [])` - Specifies the node pools that are not
allowed to be used by jobs in this namespace. This field supports wildcard
globbing through the use of `*` for multi-character matching. If specified,
any node pool is allowed except for those that match any of these patterns.
This field cannot be used with `Enabled`.
### Sample Payload
```javascript
```json
{
"Name": "api-prod",
"Description": "Production API Servers",
@ -166,12 +184,14 @@ The table below shows this endpoint's support for
"Quota": "prod-quota",
"Capabilities": {
"DisabledTaskDrivers": ["raw_exec"]
},
"NodePoolConfiguration": {
"Default": "prod-pool",
"Allowed": ["default"]
}
}
```
Note that the `Quota` key is Enterprise-only.
### Sample Request
```shell-session

View File

@ -9,8 +9,11 @@ description: |
The `namespace apply` command is used create or update a namespace.
~> Namespaces are open source in Nomad 1.0. Namespaces were Enterprise-only
when introduced in Nomad 0.7.
<Tip>
Visit the <a href="https://developer.hashicorp.com/nomad/tutorials/manage-clusters/namespaces">
Nomad Namespaces tutorial
</a> for more information.
</Tip>
## Usage

View File

@ -9,8 +9,11 @@ description: |
The `namespace delete` command is used delete a namespace.
~> Namespaces are open source in Nomad 1.0. Namespaces were Enterprise-only
when introduced in Nomad 0.7.
<Tip>
Visit the <a href="https://developer.hashicorp.com/nomad/tutorials/manage-clusters/namespaces">
Nomad Namespaces tutorial
</a> for more information.
</Tip>
## Usage

View File

@ -9,8 +9,11 @@ description: |
The `namespace` command is used to interact with namespaces.
~> Namespaces are open source in Nomad 1.0. Namespaces were Enterprise-only
when introduced in Nomad 0.7.
<Tip>
Visit the <a href="https://developer.hashicorp.com/nomad/tutorials/manage-clusters/namespaces">
Nomad Namespaces tutorial
</a> for more information.
</Tip>
## Usage

View File

@ -11,8 +11,11 @@ description: >
The `namespace inspect` command is used to view raw information about a particular
namespace.
~> Namespaces are open source in Nomad 1.0. Namespaces were Enterprise-only
when introduced in Nomad 0.7.
<Tip>
Visit the <a href="https://developer.hashicorp.com/nomad/tutorials/manage-clusters/namespaces">
Nomad Namespaces tutorial
</a> for more information.
</Tip>
## Usage

View File

@ -9,8 +9,11 @@ description: |
The `namespace list` command is used list available namespaces.
~> Namespaces are open source in Nomad 1.0. Namespaces were Enterprise-only
when introduced in Nomad 0.7.
<Tip>
Visit the <a href="https://developer.hashicorp.com/nomad/tutorials/manage-clusters/namespaces">
Nomad Namespaces tutorial
</a> for more information.
</Tip>
## Usage

View File

@ -11,8 +11,11 @@ description: >
The `namespace status` command is used to view the status of a particular
namespace.
~> Namespaces are open source in Nomad 1.0. Namespaces were Enterprise-only
when introduced in Nomad 0.7.
<Tip>
Visit the <a href="https://developer.hashicorp.com/nomad/tutorials/manage-clusters/namespaces">
Nomad Namespaces tutorial
</a> for more information.
</Tip>
## Usage

View File

@ -0,0 +1,96 @@
---
layout: docs
page_title: Namespace Specification
description: Learn about Nomad's Namespace specification.
---
# Namespace Specification
<Tip>
Visit the <a href="https://developer.hashicorp.com/nomad/tutorials/manage-clusters/namespaces">
Nomad Namespaces tutorial
</a> for more information.
</Tip>
Nomad Namespaces may be specified as HCL files and submitted by the [`nomad
namespace apply`][cli_ns_apply] CLI command.
Unlike [Job specifications][jobspecs], Namespace specifications do *not*
support [HCL2][hcl2] features like functions.
Example namespace specification:
```hcl
name = "prod-eng"
description = "Namespace for production workloads."
# Quotas are a Nomad Enterprise feature.
quota = "eng"
meta {
owner = "eng"
}
capabilities {
enabled_task_drivers = ["java", "docker"]
disabled_task_drivers = ["raw_exec"]
}
# Node Pool configuration is a Nomad Enterprise feature.
node_pool_config {
default = "prod"
allowed = ["all", "default"]
}
```
## Namespace Specification Parameters
- `name` `(string: <required>)` - Specifies the namespace to create or update.
- `description` `(string: "")` - Specifies an optional human-readable
description of the namespace.
- `quota` `(string: "")` <EnterpriseAlert inline /> - Specifies a quota to
attach to the namespace.
- `meta` `(object: null)` - Optional object with string keys and values of
metadata to attach to the namespace. Namespace metadata is not used by Nomad
and is intended for use by operators and third party tools.
- `capabilities` <code>([Capabilities](#capabilities-parameters): &lt;optional&gt;)</code> -
Specifies capabilities allowed in the namespace. These values are checked at
job submission.
- `node_pool_config` <code>([NodePoolConfiguration](#node_pool_config-parameters): &lt;optional&gt;)</code> <EnterpriseAlert inline /> -
Specifies node pool configurations. These values are checked at job
submission.
### `capabilities` Parameters
- `enabled_task_drivers` `(array<string>: [])` - List of task drivers allowed
in the namespace. If empty all task drivers are allowed.
- `disabled_task_drivers` `(array<string>: [])` - List of task drivers disabled
in the namespace.
### `node_pool_config` Parameters <EnterpriseAlert inline />
- `default` `(string: "default")` - Specifies the node pool to use for jobs in
this namespace that don't define a node pool in their specification.
- `allowed` `(array<string>: [])` - Specifies the node pools that are allowed
to be used by jobs in this namespace. This field supports wildcard globbing
through the use of `*` for multi-character matching. If specified, only the
node pools that match these patterns are allowed. This field cannot be used
with `denied`.
- `denied` `(array<string>: [])` - Specifies the node pools that are not
allowed to be used by jobs in this namespace. This field supports wildcard
globbing through the use of `*` for multi-character matching. If specified,
any node pool is allowed to be used, except for those that match any of these
patterns. This field cannot be used with `allowed`.
[cli_ns_apply]: /nomad/docs/commands/namespace/apply
[hcl2]: /nomad/docs/job-specification/hcl2
[jobspecs]: /nomad/docs/job-specification

View File

@ -1744,6 +1744,10 @@
"title": "ACL Policy",
"path": "other-specifications/acl-policy"
},
{
"title": "Namespace",
"path": "other-specifications/namespace"
},
{
"title": "Node Pool",
"path": "other-specifications/node-pool"