This commit is contained in:
Alex Dadgar 2017-10-13 14:36:02 -07:00
parent 44927ca449
commit c1cc51dbee
104 changed files with 3716 additions and 433 deletions

View File

@ -47,6 +47,7 @@ type ACL struct {
agent string
node string
operator string
quota string
}
// maxPrivilege returns the policy which grants the most privilege
@ -115,6 +116,9 @@ func NewACL(management bool, policies []*Policy) (*ACL, error) {
if policy.Operator != nil {
acl.operator = maxPrivilege(acl.operator, policy.Operator.Policy)
}
if policy.Quota != nil {
acl.quota = maxPrivilege(acl.quota, policy.Quota.Policy)
}
}
// Finalize the namespaces
@ -145,6 +149,28 @@ func (a *ACL) AllowNamespaceOperation(ns string, op string) bool {
return capabilities.Check(op)
}
// AllowNamespace checks if any operations are allowed for a namespace
func (a *ACL) AllowNamespace(ns string) bool {
// Hot path management tokens
if a.management {
return true
}
// Check for a matching capability set
raw, ok := a.namespaces.Get([]byte(ns))
if !ok {
return false
}
// Check if the capability has been granted
capabilities := raw.(capabilitySet)
if len(capabilities) == 0 {
return false
}
return !capabilities.Check(PolicyDeny)
}
// AllowAgentRead checks if read operations are allowed for an agent
func (a *ACL) AllowAgentRead() bool {
switch {
@ -223,6 +249,32 @@ func (a *ACL) AllowOperatorWrite() bool {
}
}
// AllowQuotaRead checks if read operations are allowed for all quotas
func (a *ACL) AllowQuotaRead() bool {
switch {
case a.management:
return true
case a.quota == PolicyWrite:
return true
case a.quota == PolicyRead:
return true
default:
return false
}
}
// AllowQuotaWrite checks if write operations are allowed for quotas
func (a *ACL) AllowQuotaWrite() bool {
switch {
case a.management:
return true
case a.quota == PolicyWrite:
return true
default:
return false
}
}
// IsManagement checks if this represents a management token
func (a *ACL) IsManagement() bool {
return a.management

View File

@ -60,95 +60,111 @@ func TestMaxPrivilege(t *testing.T) {
}
func TestACLManagement(t *testing.T) {
assert := assert.New(t)
// Create management ACL
acl, err := NewACL(true, nil)
assert.Nil(t, err)
assert.Nil(err)
// Check default namespace rights
assert.Equal(t, true, acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
assert.Equal(t, true, acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
assert.True(acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
assert.True(acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
assert.True(acl.AllowNamespace("default"))
// Check non-specified namespace
assert.Equal(t, true, acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
assert.True(acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
assert.True(acl.AllowNamespace("foo"))
// Check the other simpler operations
assert.Equal(t, true, acl.IsManagement())
assert.Equal(t, true, acl.AllowAgentRead())
assert.Equal(t, true, acl.AllowAgentWrite())
assert.Equal(t, true, acl.AllowNodeRead())
assert.Equal(t, true, acl.AllowNodeWrite())
assert.Equal(t, true, acl.AllowOperatorRead())
assert.Equal(t, true, acl.AllowOperatorWrite())
assert.True(acl.IsManagement())
assert.True(acl.AllowAgentRead())
assert.True(acl.AllowAgentWrite())
assert.True(acl.AllowNodeRead())
assert.True(acl.AllowNodeWrite())
assert.True(acl.AllowOperatorRead())
assert.True(acl.AllowOperatorWrite())
assert.True(acl.AllowQuotaRead())
assert.True(acl.AllowQuotaWrite())
}
func TestACLMerge(t *testing.T) {
assert := assert.New(t)
// Merge read + write policy
p1, err := Parse(readAll)
assert.Nil(t, err)
assert.Nil(err)
p2, err := Parse(writeAll)
assert.Nil(t, err)
assert.Nil(err)
acl, err := NewACL(false, []*Policy{p1, p2})
assert.Nil(t, err)
assert.Nil(err)
// Check default namespace rights
assert.Equal(t, true, acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
assert.Equal(t, true, acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
assert.True(acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
assert.True(acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
assert.True(acl.AllowNamespace("default"))
// Check non-specified namespace
assert.Equal(t, false, acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
assert.False(acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
assert.False(acl.AllowNamespace("foo"))
// Check the other simpler operations
assert.Equal(t, false, acl.IsManagement())
assert.Equal(t, true, acl.AllowAgentRead())
assert.Equal(t, true, acl.AllowAgentWrite())
assert.Equal(t, true, acl.AllowNodeRead())
assert.Equal(t, true, acl.AllowNodeWrite())
assert.Equal(t, true, acl.AllowOperatorRead())
assert.Equal(t, true, acl.AllowOperatorWrite())
assert.False(acl.IsManagement())
assert.True(acl.AllowAgentRead())
assert.True(acl.AllowAgentWrite())
assert.True(acl.AllowNodeRead())
assert.True(acl.AllowNodeWrite())
assert.True(acl.AllowOperatorRead())
assert.True(acl.AllowOperatorWrite())
assert.True(acl.AllowQuotaRead())
assert.True(acl.AllowQuotaWrite())
// Merge read + blank
p3, err := Parse("")
assert.Nil(t, err)
assert.Nil(err)
acl, err = NewACL(false, []*Policy{p1, p3})
assert.Nil(t, err)
assert.Nil(err)
// Check default namespace rights
assert.Equal(t, true, acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
assert.Equal(t, false, acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
assert.True(acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
assert.False(acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
// Check non-specified namespace
assert.Equal(t, false, acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
assert.False(acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
// Check the other simpler operations
assert.Equal(t, false, acl.IsManagement())
assert.Equal(t, true, acl.AllowAgentRead())
assert.Equal(t, false, acl.AllowAgentWrite())
assert.Equal(t, true, acl.AllowNodeRead())
assert.Equal(t, false, acl.AllowNodeWrite())
assert.Equal(t, true, acl.AllowOperatorRead())
assert.Equal(t, false, acl.AllowOperatorWrite())
assert.False(acl.IsManagement())
assert.True(acl.AllowAgentRead())
assert.False(acl.AllowAgentWrite())
assert.True(acl.AllowNodeRead())
assert.False(acl.AllowNodeWrite())
assert.True(acl.AllowOperatorRead())
assert.False(acl.AllowOperatorWrite())
assert.True(acl.AllowQuotaRead())
assert.False(acl.AllowQuotaWrite())
// Merge read + deny
p4, err := Parse(denyAll)
assert.Nil(t, err)
assert.Nil(err)
acl, err = NewACL(false, []*Policy{p1, p4})
assert.Nil(t, err)
assert.Nil(err)
// Check default namespace rights
assert.Equal(t, false, acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
assert.Equal(t, false, acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
assert.False(acl.AllowNamespaceOperation("default", NamespaceCapabilityListJobs))
assert.False(acl.AllowNamespaceOperation("default", NamespaceCapabilitySubmitJob))
// Check non-specified namespace
assert.Equal(t, false, acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
assert.False(acl.AllowNamespaceOperation("foo", NamespaceCapabilityListJobs))
// Check the other simpler operations
assert.Equal(t, false, acl.IsManagement())
assert.Equal(t, false, acl.AllowAgentRead())
assert.Equal(t, false, acl.AllowAgentWrite())
assert.Equal(t, false, acl.AllowNodeRead())
assert.Equal(t, false, acl.AllowNodeWrite())
assert.Equal(t, false, acl.AllowOperatorRead())
assert.Equal(t, false, acl.AllowOperatorWrite())
assert.False(acl.IsManagement())
assert.False(acl.AllowAgentRead())
assert.False(acl.AllowAgentWrite())
assert.False(acl.AllowNodeRead())
assert.False(acl.AllowNodeWrite())
assert.False(acl.AllowOperatorRead())
assert.False(acl.AllowOperatorWrite())
assert.False(acl.AllowQuotaRead())
assert.False(acl.AllowQuotaWrite())
}
var readAll = `
@ -164,6 +180,9 @@ node {
operator {
policy = "read"
}
quota {
policy = "read"
}
`
var writeAll = `
@ -179,6 +198,9 @@ node {
operator {
policy = "write"
}
quota {
policy = "write"
}
`
var denyAll = `
@ -194,4 +216,49 @@ node {
operator {
policy = "deny"
}
quota {
policy = "deny"
}
`
func TestAllowNamespace(t *testing.T) {
tests := []struct {
Policy string
Allow bool
}{
{
Policy: `namespace "foo" {}`,
Allow: false,
},
{
Policy: `namespace "foo" { policy = "deny" }`,
Allow: false,
},
{
Policy: `namespace "foo" { capabilities = ["deny"] }`,
Allow: false,
},
{
Policy: `namespace "foo" { capabilities = ["list-jobs"] }`,
Allow: true,
},
{
Policy: `namespace "foo" { policy = "read" }`,
Allow: true,
},
}
for _, tc := range tests {
t.Run(tc.Policy, func(t *testing.T) {
assert := assert.New(t)
policy, err := Parse(tc.Policy)
assert.Nil(err)
acl, err := NewACL(false, []*Policy{policy})
assert.Nil(err)
assert.Equal(tc.Allow, acl.AllowNamespace("foo"))
})
}
}

View File

@ -41,6 +41,7 @@ type Policy struct {
Agent *AgentPolicy `hcl:"agent"`
Node *NodePolicy `hcl:"node"`
Operator *OperatorPolicy `hcl:"operator"`
Quota *QuotaPolicy `hcl:"quota"`
Raw string `hcl:"-"`
}
@ -63,6 +64,10 @@ type OperatorPolicy struct {
Policy string
}
type QuotaPolicy struct {
Policy string
}
// isPolicyValid makes sure the given string matches one of the valid policies.
func isPolicyValid(policy string) bool {
switch policy {
@ -162,5 +167,9 @@ func Parse(rules string) (*Policy, error) {
if p.Operator != nil && !isPolicyValid(p.Operator.Policy) {
return nil, fmt.Errorf("Invalid operator policy: %#v", p.Operator)
}
if p.Quota != nil && !isPolicyValid(p.Quota.Policy) {
return nil, fmt.Errorf("Invalid quota policy: %#v", p.Quota)
}
return p, nil
}

View File

@ -55,6 +55,9 @@ func TestParse(t *testing.T) {
operator {
policy = "deny"
}
quota {
policy = "read"
}
`,
"",
&Policy{
@ -96,6 +99,9 @@ func TestParse(t *testing.T) {
Operator: &OperatorPolicy{
Policy: PolicyDeny,
},
Quota: &QuotaPolicy{
Policy: PolicyRead,
},
},
},
{
@ -143,6 +149,15 @@ func TestParse(t *testing.T) {
"Invalid operator policy",
nil,
},
{
`
quota {
policy = "foo"
}
`,
"Invalid quota policy",
nil,
},
{
`
namespace "has a space"{

View File

@ -107,6 +107,7 @@ type AllocationMetric struct {
NodesExhausted int
ClassExhausted map[string]int
DimensionExhausted map[string]int
QuotaExhausted []string
Scores map[string]float64
AllocationTime time.Duration
CoalescedFailures int

View File

@ -10,5 +10,6 @@ const (
Jobs Context = "jobs"
Nodes Context = "nodes"
Namespaces Context = "namespaces"
Quotas Context = "quotas"
All Context = "all"
)

View File

@ -73,6 +73,7 @@ type Evaluation struct {
FailedTGAllocs map[string]*AllocationMetric
ClassEligibility map[string]bool
EscapedComputedClass bool
QuotaLimitReached string
AnnotatePlan bool
QueuedAllocations map[string]int
SnapshotIndex uint64

View File

@ -69,6 +69,7 @@ func (n *Namespaces) Delete(namespace string, q *WriteOptions) (*WriteMeta, erro
type Namespace struct {
Name string
Description string
Quota string
CreateIndex uint64
ModifyIndex uint64
}

186
api/quota.go Normal file
View File

@ -0,0 +1,186 @@
package api
import (
"fmt"
"sort"
)
// Quotas is used to query the quotas endpoints.
type Quotas struct {
client *Client
}
// Quotas returns a new handle on the quotas.
func (c *Client) Quotas() *Quotas {
return &Quotas{client: c}
}
// List is used to dump all of the quota specs
func (q *Quotas) List(qo *QueryOptions) ([]*QuotaSpec, *QueryMeta, error) {
var resp []*QuotaSpec
qm, err := q.client.query("/v1/quotas", &resp, qo)
if err != nil {
return nil, nil, err
}
sort.Sort(QuotaSpecIndexSort(resp))
return resp, qm, nil
}
// PrefixList is used to do a PrefixList search over quota specs
func (q *Quotas) PrefixList(prefix string, qo *QueryOptions) ([]*QuotaSpec, *QueryMeta, error) {
if qo == nil {
qo = &QueryOptions{Prefix: prefix}
} else {
qo.Prefix = prefix
}
return q.List(qo)
}
// ListUsage is used to dump all of the quota usages
func (q *Quotas) ListUsage(qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) {
var resp []*QuotaUsage
qm, err := q.client.query("/v1/quota-usages", &resp, qo)
if err != nil {
return nil, nil, err
}
sort.Sort(QuotaUsageIndexSort(resp))
return resp, qm, nil
}
// PrefixList is used to do a PrefixList search over quota usages
func (q *Quotas) PrefixListUsage(prefix string, qo *QueryOptions) ([]*QuotaUsage, *QueryMeta, error) {
if qo == nil {
qo = &QueryOptions{Prefix: prefix}
} else {
qo.Prefix = prefix
}
return q.ListUsage(qo)
}
// Info is used to query a single quota spec by its name.
func (q *Quotas) Info(name string, qo *QueryOptions) (*QuotaSpec, *QueryMeta, error) {
var resp QuotaSpec
qm, err := q.client.query("/v1/quota/"+name, &resp, qo)
if err != nil {
return nil, nil, err
}
return &resp, qm, nil
}
// Usage is used to query a single quota usage by its name.
func (q *Quotas) Usage(name string, qo *QueryOptions) (*QuotaUsage, *QueryMeta, error) {
var resp QuotaUsage
qm, err := q.client.query("/v1/quota/usage/"+name, &resp, qo)
if err != nil {
return nil, nil, err
}
return &resp, qm, nil
}
// Register is used to register a quota spec.
func (q *Quotas) Register(spec *QuotaSpec, qo *WriteOptions) (*WriteMeta, error) {
wm, err := q.client.write("/v1/quota", spec, nil, qo)
if err != nil {
return nil, err
}
return wm, nil
}
// Delete is used to delete a quota spec
func (q *Quotas) Delete(quota string, qo *WriteOptions) (*WriteMeta, error) {
wm, err := q.client.delete(fmt.Sprintf("/v1/quota/%s", quota), nil, qo)
if err != nil {
return nil, err
}
return wm, nil
}
// QuotaSpec specifies the allowed resource usage across regions.
type QuotaSpec struct {
// Name is the name for the quota object
Name string
// Description is an optional description for the quota object
Description string
// Limits is the set of quota limits encapsulated by this quota object. Each
// limit applies quota in a particular region and in the future over a
// particular priority range and datacenter set.
Limits []*QuotaLimit
// Raft indexes to track creation and modification
CreateIndex uint64
ModifyIndex uint64
}
// QuotaLimit describes the resource limit in a particular region.
type QuotaLimit struct {
// Region is the region in which this limit has affect
Region string
// RegionLimit is the quota limit that applies to any allocation within a
// referencing namespace in the region. A value of zero is treated as
// unlimited and a negative value is treated as fully disallowed. This is
// useful for once we support GPUs
RegionLimit *Resources
// Hash is the hash of the object and is used to make replication efficient.
Hash []byte
}
// QuotaUsage is the resource usage of a Quota
type QuotaUsage struct {
Name string
Used map[string]*QuotaLimit
CreateIndex uint64
ModifyIndex uint64
}
// QuotaSpecIndexSort is a wrapper to sort QuotaSpecs by CreateIndex. We
// reverse the test so that we get the highest index first.
type QuotaSpecIndexSort []*QuotaSpec
func (q QuotaSpecIndexSort) Len() int {
return len(q)
}
func (q QuotaSpecIndexSort) Less(i, j int) bool {
return q[i].CreateIndex > q[j].CreateIndex
}
func (q QuotaSpecIndexSort) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
}
// QuotaUsageIndexSort is a wrapper to sort QuotaUsages by CreateIndex. We
// reverse the test so that we get the highest index first.
type QuotaUsageIndexSort []*QuotaUsage
func (q QuotaUsageIndexSort) Len() int {
return len(q)
}
func (q QuotaUsageIndexSort) Less(i, j int) bool {
return q[i].CreateIndex > q[j].CreateIndex
}
func (q QuotaUsageIndexSort) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
}
// QuotaLimitSort is a wrapper to sort QuotaLimits
type QuotaLimitSort []*QuotaLimit
func (q QuotaLimitSort) Len() int {
return len(q)
}
func (q QuotaLimitSort) Less(i, j int) bool {
return q[i].Region < q[j].Region
}
func (q QuotaLimitSort) Swap(i, j int) {
q[i], q[j] = q[j], q[i]
}

208
api/quota_test.go Normal file
View File

@ -0,0 +1,208 @@
// +build ent
package api
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestQuotas_Register(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
// Create a quota spec and register it
qs := testQuotaSpec()
wm, err := quotas.Register(qs, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the specs back out again
resp, qm, err := quotas.List(nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 1)
assert.Equal(qs.Name, resp[0].Name)
}
func TestQuotas_Register_Invalid(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
// Create an invalid namespace and register it
qs := testQuotaSpec()
qs.Name = "*"
_, err := quotas.Register(qs, nil)
assert.NotNil(err)
}
func TestQuotas_Info(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
// Trying to retrieve a quota spec before it exists returns an error
_, _, err := quotas.Info("foo", nil)
assert.NotNil(err)
assert.Contains(err.Error(), "not found")
// Register the quota
qs := testQuotaSpec()
wm, err := quotas.Register(qs, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the quota again and ensure it exists
result, qm, err := quotas.Info(qs.Name, nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.NotNil(result)
assert.Equal(qs.Name, result.Name)
}
func TestQuotas_Usage(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
// Trying to retrieve a quota spec before it exists returns an error
_, _, err := quotas.Usage("foo", nil)
assert.NotNil(err)
assert.Contains(err.Error(), "not found")
// Register the quota
qs := testQuotaSpec()
wm, err := quotas.Register(qs, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the quota usage and ensure it exists
result, qm, err := quotas.Usage(qs.Name, nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.NotNil(result)
assert.Equal(qs.Name, result.Name)
}
func TestQuotas_Delete(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
// Create a quota and register it
qs := testQuotaSpec()
wm, err := quotas.Register(qs, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the quota back out again
resp, qm, err := quotas.List(nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 1)
assert.Equal(qs.Name, resp[0].Name)
// Delete the quota
wm, err = quotas.Delete(qs.Name, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the quotas back out again
resp, qm, err = quotas.List(nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 0)
}
func TestQuotas_List(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
// Create two quotas and register them
qs1 := testQuotaSpec()
qs2 := testQuotaSpec()
qs1.Name = "fooaaa"
qs2.Name = "foobbb"
wm, err := quotas.Register(qs1, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
wm, err = quotas.Register(qs2, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the quotas
resp, qm, err := quotas.List(nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
// Query the quotas using a prefix
resp, qm, err = quotas.PrefixList("foo", nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
// Query the quotas using a prefix
resp, qm, err = quotas.PrefixList("foob", nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 1)
assert.Equal(qs2.Name, resp[0].Name)
}
func TestQuotas_ListUsages(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
quotas := c.Quotas()
// Create two quotas and register them
qs1 := testQuotaSpec()
qs2 := testQuotaSpec()
qs1.Name = "fooaaa"
qs2.Name = "foobbb"
wm, err := quotas.Register(qs1, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
wm, err = quotas.Register(qs2, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the quotas
resp, qm, err := quotas.ListUsage(nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
// Query the quotas using a prefix
resp, qm, err = quotas.PrefixListUsage("foo", nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
// Query the quotas using a prefix
resp, qm, err = quotas.PrefixListUsage("foob", nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 1)
assert.Equal(qs2.Name, resp[0].Name)
}

View File

@ -62,3 +62,19 @@ func testNamespace() *Namespace {
Description: "Testing namespaces",
}
}
func testQuotaSpec() *QuotaSpec {
return &QuotaSpec{
Name: "test-namespace",
Description: "Testing namespaces",
Limits: []*QuotaLimit{
{
Region: "global",
RegionLimit: &Resources{
CPU: helper.IntToPtr(2000),
MemoryMB: helper.IntToPtr(2000),
},
},
},
}
}

View File

@ -16,7 +16,7 @@ func (c *ACLBootstrapCommand) Help() string {
helpText := `
Usage: nomad acl bootstrap [options]
Bootstrap is used to bootstrap the ACL system and get an initial token.
Bootstrap is used to bootstrap the ACL system and get an initial token.
General Options:

View File

@ -18,8 +18,8 @@ func (c *ACLPolicyApplyCommand) Help() string {
helpText := `
Usage: nomad acl policy apply [options] <name> <path>
Apply is used to create or update an ACL policy. The policy is
sourced from <path> or from stdin if path is "-".
Apply is used to create or update an ACL policy. The policy is
sourced from <path> or from stdin if path is "-".
General Options:

View File

@ -15,7 +15,7 @@ func (c *ACLPolicyDeleteCommand) Help() string {
helpText := `
Usage: nomad acl policy delete <name>
Delete is used to delete an existing ACL policy.
Delete is used to delete an existing ACL policy.
General Options:

View File

@ -15,7 +15,7 @@ func (c *ACLPolicyInfoCommand) Help() string {
helpText := `
Usage: nomad acl policy info <name>
Info is used to fetch information on an existing ACL policy.
Info is used to fetch information on an existing ACL policy.
General Options:

View File

@ -16,7 +16,7 @@ func (c *ACLTokenCreateCommand) Help() string {
helpText := `
Usage: nomad acl token create [options]
Create is used to issue new ACL tokens. Requires a management token.
Create is used to issue new ACL tokens. Requires a management token.
General Options:

View File

@ -15,7 +15,7 @@ func (c *ACLTokenDeleteCommand) Help() string {
helpText := `
Usage: nomad acl token delete <token_accessor_id>
Delete is used to delete an existing ACL token. Requires a management token.
Delete is used to delete an existing ACL token. Requires a management token.
General Options:

View File

@ -15,7 +15,7 @@ func (c *ACLTokenInfoCommand) Help() string {
helpText := `
Usage: nomad acl token info <token_accessor_id>
Info is used to fetch information on an existing ACL tokens. Requires a management token.
Info is used to fetch information on an existing ACL tokens. Requires a management token.
General Options:

View File

@ -15,7 +15,7 @@ func (c *ACLTokenUpdateCommand) Help() string {
helpText := `
Usage: nomad acl token update <token_accessor_id>
Update is used to update an existing ACL token. Requires a management token.
Update is used to update an existing ACL token. Requires a management token.
General Options:

View File

@ -11,6 +11,7 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/nomad/helper"
"github.com/hashicorp/nomad/nomad/structs/config"
"github.com/mitchellh/mapstructure"
)
@ -99,7 +100,7 @@ func parseConfig(result *Config, list *ast.ObjectList) error {
"acl",
"sentinel",
}
if err := checkHCLKeys(list, valid); err != nil {
if err := helper.CheckHCLKeys(list, valid); err != nil {
return multierror.Prefix(err, "config:")
}
@ -244,7 +245,7 @@ func parsePorts(result **Ports, list *ast.ObjectList) error {
"rpc",
"serf",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -276,7 +277,7 @@ func parseAddresses(result **Addresses, list *ast.ObjectList) error {
"rpc",
"serf",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -308,7 +309,7 @@ func parseAdvertise(result **AdvertiseAddrs, list *ast.ObjectList) error {
"rpc",
"serf",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -367,7 +368,7 @@ func parseClient(result **ClientConfig, list *ast.ObjectList) error {
"gc_max_allocs",
"no_host_uuid",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -473,7 +474,7 @@ func parseReserved(result **Resources, list *ast.ObjectList) error {
"iops",
"reserved_ports",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -534,7 +535,7 @@ func parseServer(result **ServerConfig, list *ast.ObjectList) error {
"encrypt",
"authoritative_region",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -584,7 +585,7 @@ func parseACL(result **ACLConfig, list *ast.ObjectList) error {
"policy_ttl",
"replication_token",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -646,7 +647,7 @@ func parseTelemetry(result **Telemetry, list *ast.ObjectList) error {
"disable_tagged_metrics",
"backwards_compatible_metrics",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -686,7 +687,7 @@ func parseAtlas(result **AtlasConfig, list *ast.ObjectList) error {
"join",
"endpoint",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -731,7 +732,7 @@ func parseConsulConfig(result **config.ConsulConfig, list *ast.ObjectList) error
"verify_ssl",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -776,7 +777,7 @@ func parseTLSConfig(result **config.TLSConfig, list *ast.ObjectList) error {
"verify_https_client",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -818,7 +819,7 @@ func parseVaultConfig(result **config.VaultConfig, list *ast.ObjectList) error {
"token",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -865,7 +866,7 @@ func parseSentinel(result **config.SentinelConfig, list *ast.ObjectList) error {
valid := []string{
"import",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return err
}
@ -877,31 +878,3 @@ func parseSentinel(result **config.SentinelConfig, list *ast.ObjectList) error {
*result = &config
return nil
}
func checkHCLKeys(node ast.Node, valid []string) error {
var list *ast.ObjectList
switch n := node.(type) {
case *ast.ObjectList:
list = n
case *ast.ObjectType:
list = n.List
default:
return fmt.Errorf("cannot check HCL keys of type %T", n)
}
validMap := make(map[string]struct{}, len(valid))
for _, v := range valid {
validMap[v] = struct{}{}
}
var result error
for _, item := range list.Items {
key := item.Keys[0].Token.Value().(string)
if _, ok := validMap[key]; !ok {
result = multierror.Append(result, fmt.Errorf(
"invalid key: %s", key))
}
}
return result
}

View File

@ -9,8 +9,14 @@ func (s *HTTPServer) registerEnterpriseHandlers() {
s.mux.HandleFunc("/v1/namespaces", s.wrap(s.entOnly))
s.mux.HandleFunc("/v1/namespace", s.wrap(s.entOnly))
s.mux.HandleFunc("/v1/namespace/", s.wrap(s.entOnly))
s.mux.HandleFunc("/v1/sentinel/policies", s.wrap(s.entOnly))
s.mux.HandleFunc("/v1/sentinel/policy/", s.wrap(s.entOnly))
s.mux.HandleFunc("/v1/quotas", s.wrap(s.entOnly))
s.mux.HandleFunc("/v1/quota-usages", s.wrap(s.entOnly))
s.mux.HandleFunc("/v1/quota/", s.wrap(s.entOnly))
s.mux.HandleFunc("/v1/quota", s.wrap(s.entOnly))
}
func (s *HTTPServer) entOnly(resp http.ResponseWriter, req *http.Request) (interface{}, error) {

View File

@ -16,10 +16,10 @@ func (c *DeploymentFailCommand) Help() string {
helpText := `
Usage: nomad deployment fail [options] <deployment id>
Fail is used to mark a deployment as failed. Failing a deployment will
stop the placement of new allocations as part of rolling deployment and
if the job is configured to auto revert, the job will attempt to roll back to a
stable version.
Fail is used to mark a deployment as failed. Failing a deployment will
stop the placement of new allocations as part of rolling deployment and
if the job is configured to auto revert, the job will attempt to roll back to a
stable version.
General Options:

View File

@ -16,7 +16,7 @@ func (c *DeploymentListCommand) Help() string {
helpText := `
Usage: nomad deployment list [options]
List is used to list the set of deployments tracked by Nomad.
List is used to list the set of deployments tracked by Nomad.
General Options:

View File

@ -16,8 +16,8 @@ func (c *DeploymentPauseCommand) Help() string {
helpText := `
Usage: nomad deployment pause [options] <deployment id>
Pause is used to pause a deployment. Pausing a deployment will pause the
placement of new allocations as part of rolling deployment.
Pause is used to pause a deployment. Pausing a deployment will pause the
placement of new allocations as part of rolling deployment.
General Options:

View File

@ -18,13 +18,13 @@ func (c *DeploymentPromoteCommand) Help() string {
helpText := `
Usage: nomad deployment promote [options] <deployment id>
Promote is used to promote task groups in a deployment. Promotion should occur
when the deployment has placed canaries for a task group and those canaries have
been deemed healthy. When a task group is promoted, the rolling upgrade of the
remaining allocations is unblocked. If the canaries are found to be unhealthy,
the deployment may either be failed using the "nomad deployment fail" command,
the job can be failed forward by submitting a new version or failed backwards by
reverting to an older version using the "nomad job revert" command.
Promote is used to promote task groups in a deployment. Promotion should occur
when the deployment has placed canaries for a task group and those canaries have
been deemed healthy. When a task group is promoted, the rolling upgrade of the
remaining allocations is unblocked. If the canaries are found to be unhealthy,
the deployment may either be failed using the "nomad deployment fail" command,
the job can be failed forward by submitting a new version or failed backwards by
reverting to an older version using the "nomad job revert" command.
General Options:

View File

@ -16,8 +16,8 @@ func (c *DeploymentResumeCommand) Help() string {
helpText := `
Usage: nomad deployment resume [options] <deployment id>
Resume is used to unpause a paused deployment. Resuming a deployment will
resume the placement of new allocations as part of rolling deployment.
Resume is used to unpause a paused deployment. Resuming a deployment will
resume the placement of new allocations as part of rolling deployment.
General Options:

View File

@ -18,8 +18,8 @@ func (c *DeploymentStatusCommand) Help() string {
helpText := `
Usage: nomad deployment status [options] <deployment id>
Status is used to display the status of a deployment. The status will display
the number of desired changes as well as the currently applied changes.
Status is used to display the status of a deployment. The status will display
the number of desired changes as well as the currently applied changes.
General Options:

View File

@ -16,7 +16,7 @@ func (c *JobDeploymentsCommand) Help() string {
helpText := `
Usage: nomad job deployments [options] <job>
Deployments is used to display the deployments for a particular job.
Deployments is used to display the deployments for a particular job.
General Options:

View File

@ -19,14 +19,14 @@ func (c *JobDispatchCommand) Help() string {
helpText := `
Usage: nomad job dispatch [options] <parameterized job> [input source]
Dispatch creates an instance of a parameterized job. A data payload to the
dispatched instance can be provided via stdin by using "-" or by specifying a
path to a file. Metadata can be supplied by using the meta flag one or more
times.
Dispatch creates an instance of a parameterized job. A data payload to the
dispatched instance can be provided via stdin by using "-" or by specifying a
path to a file. Metadata can be supplied by using the meta flag one or more
times.
Upon successful creation, the dispatched job ID will be printed and the
triggered evaluation will be monitored. This can be disabled by supplying the
detach flag.
Upon successful creation, the dispatched job ID will be printed and the
triggered evaluation will be monitored. This can be disabled by supplying the
detach flag.
General Options:

View File

@ -21,10 +21,10 @@ func (c *JobHistoryCommand) Help() string {
helpText := `
Usage: nomad job history [options] <job>
History is used to display the known versions of a particular job. The command
can display the diff between job versions and can be useful for understanding
the changes that occurred to the job as well as deciding job versions to revert
to.
History is used to display the known versions of a particular job. The command
can display the diff between job versions and can be useful for understanding
the changes that occurred to the job as well as deciding job versions to revert
to.
General Options:

View File

@ -18,14 +18,14 @@ func (c *JobPromoteCommand) Help() string {
helpText := `
Usage: nomad job promote [options] <job id>
Promote is used to promote task groups in the most recent deployment for the
given job. Promotion should occur when the deployment has placed canaries for a
task group and those canaries have been deemed healthy. When a task group is
promoted, the rolling upgrade of the remaining allocations is unblocked. If the
canaries are found to be unhealthy, the deployment may either be failed using
the "nomad deployment fail" command, the job can be failed forward by submitting
a new version or failed backwards by reverting to an older version using the
"nomad job revert" command.
Promote is used to promote task groups in the most recent deployment for the
given job. Promotion should occur when the deployment has placed canaries for a
task group and those canaries have been deemed healthy. When a task group is
promoted, the rolling upgrade of the remaining allocations is unblocked. If the
canaries are found to be unhealthy, the deployment may either be failed using
the "nomad deployment fail" command, the job can be failed forward by submitting
a new version or failed backwards by reverting to an older version using the
"nomad job revert" command.
General Options:

View File

@ -16,8 +16,8 @@ func (c *JobRevertCommand) Help() string {
helpText := `
Usage: nomad job revert [options] <job> <version>
Revert is used to revert a job to a prior version of the job. The available
versions to revert to can be found using "nomad job history" command.
Revert is used to revert a job to a prior version of the job. The available
versions to revert to can be found using "nomad job history" command.
General Options:

View File

@ -13,6 +13,21 @@ type KeygenCommand struct {
Meta
}
func (c *KeygenCommand) Synopsis() string {
return "Generates a new encryption key"
}
func (c *KeygenCommand) Help() string {
helpText := `
Usage: nomad keygen
Generates a new encryption key that can be used to configure the
agent to encrypt traffic. The output of this command is already
in the proper format that the agent expects.
`
return strings.TrimSpace(helpText)
}
func (c *KeygenCommand) Run(_ []string) int {
key := make([]byte, 16)
n, err := rand.Reader.Read(key)
@ -28,18 +43,3 @@ func (c *KeygenCommand) Run(_ []string) int {
c.Ui.Output(base64.StdEncoding.EncodeToString(key))
return 0
}
func (c *KeygenCommand) Synopsis() string {
return "Generates a new encryption key"
}
func (c *KeygenCommand) Help() string {
helpText := `
Usage: nomad keygen
Generates a new encryption key that can be used to configure the
agent to encrypt traffic. The output of this command is already
in the proper format that the agent expects.
`
return strings.TrimSpace(helpText)
}

View File

@ -15,6 +15,58 @@ type KeyringCommand struct {
Meta
}
func (c *KeyringCommand) Help() string {
helpText := `
Usage: nomad keyring [options]
Manages encryption keys used for gossip messages between Nomad servers. Gossip
encryption is optional. When enabled, this command may be used to examine
active encryption keys in the cluster, add new keys, and remove old ones. When
combined, this functionality provides the ability to perform key rotation
cluster-wide, without disrupting the cluster.
All operations performed by this command can only be run against server nodes.
All variations of the keyring command return 0 if all nodes reply and there
are no errors. If any node fails to reply or reports failure, the exit code
will be 1.
General Options:
` + generalOptionsUsage() + `
Keyring Options:
-install=<key> Install a new encryption key. This will broadcast
the new key to all members in the cluster.
-list List all keys currently in use within the cluster.
-remove=<key> Remove the given key from the cluster. This
operation may only be performed on keys which are
not currently the primary key.
-use=<key> Change the primary encryption key, which is used to
encrypt messages. The key must already be installed
before this operation can succeed.
`
return strings.TrimSpace(helpText)
}
func (c *KeyringCommand) Synopsis() string {
return "Manages gossip layer encryption keys"
}
func (c *KeyringCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-install": complete.PredictAnything,
"-list": complete.PredictNothing,
"-remove": complete.PredictAnything,
"-use": complete.PredictAnything,
})
}
func (c *KeyringCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictNothing
}
func (c *KeyringCommand) Run(args []string) int {
var installKey, useKey, removeKey, token string
var listKeys bool
@ -117,55 +169,3 @@ func (c *KeyringCommand) handleKeyResponse(resp *api.KeyringResponse) {
}
c.Ui.Output(formatList(out))
}
func (c *KeyringCommand) Help() string {
helpText := `
Usage: nomad keyring [options]
Manages encryption keys used for gossip messages between Nomad servers. Gossip
encryption is optional. When enabled, this command may be used to examine
active encryption keys in the cluster, add new keys, and remove old ones. When
combined, this functionality provides the ability to perform key rotation
cluster-wide, without disrupting the cluster.
All operations performed by this command can only be run against server nodes.
All variations of the keyring command return 0 if all nodes reply and there
are no errors. If any node fails to reply or reports failure, the exit code
will be 1.
General Options:
` + generalOptionsUsage() + `
Keyring Options:
-install=<key> Install a new encryption key. This will broadcast
the new key to all members in the cluster.
-list List all keys currently in use within the cluster.
-remove=<key> Remove the given key from the cluster. This
operation may only be performed on keys which are
not currently the primary key.
-use=<key> Change the primary encryption key, which is used to
encrypt messages. The key must already be installed
before this operation can succeed.
`
return strings.TrimSpace(helpText)
}
func (c *KeyringCommand) Synopsis() string {
return "Manages gossip layer encryption keys"
}
func (c *KeyringCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-install": complete.PredictAnything,
"-list": complete.PredictNothing,
"-remove": complete.PredictAnything,
"-use": complete.PredictAnything,
})
}
func (c *KeyringCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictNothing
}

View File

@ -357,6 +357,11 @@ func formatAllocMetrics(metrics *api.AllocationMetric, scores bool, prefix strin
out += fmt.Sprintf("%s* Dimension %q exhausted on %d nodes\n", prefix, dim, num)
}
// Print quota info
for _, dim := range metrics.QuotaExhausted {
out += fmt.Sprintf("%s* Quota limit hit %q\n", prefix, dim)
}
// Print scores
if scores {
for name, score := range metrics.Scores {

View File

@ -5,6 +5,7 @@ import (
"strings"
"github.com/hashicorp/nomad/api"
flaghelper "github.com/hashicorp/nomad/helper/flag-helpers"
"github.com/posener/complete"
)
@ -16,8 +17,8 @@ func (c *NamespaceApplyCommand) Help() string {
helpText := `
Usage: nomad namespace apply [options] <namespace>
Apply is used to create or update a namespace. It takes the namespace name to
create or update as its only argument.
Apply is used to create or update a namespace. It takes the namespace name to
create or update as its only argument.
General Options:
@ -25,6 +26,9 @@ General Options:
Apply Options:
-quota
The quota to attach to the namespace.
-description
An optional description for the namespace.
`
@ -35,11 +39,12 @@ func (c *NamespaceApplyCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-description": complete.PredictAnything,
"-quota": QuotaPredictor(c.Meta.Client),
})
}
func (c *NamespaceApplyCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictNothing
return NamespacePredictor(c.Meta.Client, nil)
}
func (c *NamespaceApplyCommand) Synopsis() string {
@ -47,11 +52,18 @@ func (c *NamespaceApplyCommand) Synopsis() string {
}
func (c *NamespaceApplyCommand) Run(args []string) int {
var description string
var description, quota *string
flags := c.Meta.FlagSet("namespace apply", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.StringVar(&description, "description", "", "")
flags.Var((flaghelper.FuncVar)(func(s string) error {
description = &s
return nil
}), "description", "")
flags.Var((flaghelper.FuncVar)(func(s string) error {
quota = &s
return nil
}), "quota", "")
if err := flags.Parse(args); err != nil {
return 1
@ -79,10 +91,25 @@ func (c *NamespaceApplyCommand) Run(args []string) int {
return 1
}
// Create the request object.
ns := &api.Namespace{
Name: name,
Description: description,
// Lookup the given namespace
ns, _, err := client.Namespaces().Info(name, nil)
if err != nil && !strings.Contains(err.Error(), "404") {
c.Ui.Error(fmt.Sprintf("Error looking up namespace: %s", err))
return 1
}
if ns == nil {
ns = &api.Namespace{
Name: name,
}
}
// Add what is set
if description != nil {
ns.Description = *description
}
if quota != nil {
ns.Quota = *quota
}
_, err = client.Namespaces().Register(ns, nil)

View File

@ -15,7 +15,7 @@ func (c *NamespaceDeleteCommand) Help() string {
helpText := `
Usage: nomad namespace delete [options] <namespace>
Delete is used to remove a namespace.
Delete is used to remove a namespace.
General Options:

View File

@ -0,0 +1,94 @@
package command
import (
"fmt"
"strings"
"github.com/posener/complete"
)
type NamespaceInspectCommand struct {
Meta
}
func (c *NamespaceInspectCommand) Help() string {
helpText := `
Usage: nomad namespace inspect [options] <quota>
Inspect is used to view raw information about a particular namespace.
General Options:
` + generalOptionsUsage() + `
Inspect Options:
-t
Format and display the namespaces using a Go template.
`
return strings.TrimSpace(helpText)
}
func (c *NamespaceInspectCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-t": complete.PredictAnything,
})
}
func (c *NamespaceInspectCommand) AutocompleteArgs() complete.Predictor {
return NamespacePredictor(c.Meta.Client, nil)
}
func (c *NamespaceInspectCommand) Synopsis() string {
return "Inspect a namespace"
}
func (c *NamespaceInspectCommand) Run(args []string) int {
var tmpl string
flags := c.Meta.FlagSet("namespace inspect", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.StringVar(&tmpl, "t", "", "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got one arguments
args = flags.Args()
if l := len(args); l != 1 {
c.Ui.Error(c.Help())
return 1
}
name := args[0]
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Do a prefix lookup
ns, possible, err := getNamespace(client.Namespaces(), name)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error retrieving namespaces: %s", err))
return 1
}
if len(possible) != 0 {
c.Ui.Error(fmt.Sprintf("Prefix matched multiple namespaces\n\n%s", formatNamespaces(possible)))
return 1
}
out, err := Format(len(tmpl) == 0, tmpl, ns)
if err != nil {
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(out)
return 0
}

View File

@ -0,0 +1,94 @@
// +build ent
package command
import (
"strings"
"testing"
"github.com/hashicorp/nomad/api"
"github.com/mitchellh/cli"
"github.com/posener/complete"
"github.com/stretchr/testify/assert"
)
func TestNamespaceInspectCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &NamespaceInspectCommand{}
}
func TestNamespaceInspectCommand_Fails(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &NamespaceInspectCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expected help output, got: %s", out)
}
ui.ErrorWriter.Reset()
if code := cmd.Run([]string{"-address=nope", "foo"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "retrieving namespace") {
t.Fatalf("connection error, got: %s", out)
}
ui.ErrorWriter.Reset()
}
func TestNamespaceInspectCommand_Good(t *testing.T) {
t.Parallel()
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &NamespaceInspectCommand{Meta: Meta{Ui: ui}}
// Create a namespace
ns := &api.Namespace{
Name: "foo",
}
_, err := client.Namespaces().Register(ns, nil)
assert.Nil(t, err)
// Inspect
if code := cmd.Run([]string{"-address=" + url, ns.Name}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
out := ui.OutputWriter.String()
if !strings.Contains(out, ns.Name) {
t.Fatalf("expected namespace, got: %s", out)
}
}
func TestNamespaceInspectCommand_AutocompleteArgs(t *testing.T) {
assert := assert.New(t)
t.Parallel()
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &NamespaceInspectCommand{Meta: Meta{Ui: ui, flagAddress: url}}
// Create a namespace
ns := &api.Namespace{
Name: "foo",
}
_, err := client.Namespaces().Register(ns, nil)
assert.Nil(err)
args := complete.Args{Last: "f"}
predictor := cmd.AutocompleteArgs()
res := predictor.Predict(args)
assert.Equal(1, len(res))
assert.Equal(ns.Name, res[0])
}

View File

@ -17,7 +17,7 @@ func (c *NamespaceListCommand) Help() string {
helpText := `
Usage: nomad namespace list [options]
List is used to list available namespaces.
List is used to list available namespaces.
General Options:

133
command/namespace_status.go Normal file
View File

@ -0,0 +1,133 @@
package command
import (
"fmt"
"strings"
"github.com/hashicorp/nomad/api"
"github.com/posener/complete"
)
type NamespaceStatusCommand struct {
Meta
}
func (c *NamespaceStatusCommand) Help() string {
helpText := `
Usage: nomad namespace status [options] <namespace>
Status is used to view the status of a particular namespace.
General Options:
` + generalOptionsUsage()
return strings.TrimSpace(helpText)
}
func (c *NamespaceStatusCommand) AutocompleteFlags() complete.Flags {
return c.Meta.AutocompleteFlags(FlagSetClient)
}
func (c *NamespaceStatusCommand) AutocompleteArgs() complete.Predictor {
return NamespacePredictor(c.Meta.Client, nil)
}
func (c *NamespaceStatusCommand) Synopsis() string {
return "Display a namespace's status"
}
func (c *NamespaceStatusCommand) Run(args []string) int {
flags := c.Meta.FlagSet("namespace status", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got one arguments
args = flags.Args()
if l := len(args); l != 1 {
c.Ui.Error(c.Help())
return 1
}
name := args[0]
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Do a prefix lookup
ns, possible, err := getNamespace(client.Namespaces(), name)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error retrieving namespaces: %s", err))
return 1
}
if len(possible) != 0 {
c.Ui.Error(fmt.Sprintf("Prefix matched multiple namespaces\n\n%s", formatNamespaces(possible)))
return 1
}
c.Ui.Output(formatNamespaceBasics(ns))
if ns.Quota != "" {
quotas := client.Quotas()
spec, _, err := quotas.Info(ns.Quota, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error retrieving quota spec: %s", err))
return 1
}
// Get the quota usages
usages, failures := quotaUsages(spec, quotas)
// Format the limits
c.Ui.Output(c.Colorize().Color("\n[bold]Quota Limits[reset]"))
c.Ui.Output(formatQuotaLimits(spec, usages))
// Display any failures
if len(failures) != 0 {
c.Ui.Error(c.Colorize().Color("\n[bold][red]Lookup Failures[reset]"))
for region, failure := range failures {
c.Ui.Error(fmt.Sprintf(" * Failed to retrieve quota usage for region %q: %v", region, failure))
return 1
}
}
}
return 0
}
// formatNamespaceBasics formats the basic information of the namespace
func formatNamespaceBasics(ns *api.Namespace) string {
basic := []string{
fmt.Sprintf("Name|%s", ns.Name),
fmt.Sprintf("Description|%s", ns.Description),
fmt.Sprintf("Quota|%s", ns.Quota),
}
return formatKV(basic)
}
func getNamespace(client *api.Namespaces, ns string) (match *api.Namespace, possible []*api.Namespace, err error) {
// Do a prefix lookup
namespaces, _, err := client.PrefixList(ns, nil)
if err != nil {
return nil, nil, err
}
l := len(namespaces)
switch {
case l == 0:
return nil, nil, fmt.Errorf("Namespace %q matched no namespaces", ns)
case l == 1:
return namespaces[0], nil, nil
default:
return nil, namespaces, nil
}
}

View File

@ -0,0 +1,135 @@
// +build ent
package command
import (
"strings"
"testing"
"github.com/hashicorp/nomad/api"
"github.com/mitchellh/cli"
"github.com/posener/complete"
"github.com/stretchr/testify/assert"
)
func TestNamespaceStatusCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &NamespaceStatusCommand{}
}
func TestNamespaceStatusCommand_Fails(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &NamespaceStatusCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expected help output, got: %s", out)
}
ui.ErrorWriter.Reset()
if code := cmd.Run([]string{"-address=nope", "foo"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "retrieving namespace") {
t.Fatalf("connection error, got: %s", out)
}
ui.ErrorWriter.Reset()
}
func TestNamespaceStatusCommand_Good(t *testing.T) {
t.Parallel()
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &NamespaceStatusCommand{Meta: Meta{Ui: ui}}
// Create a namespace
ns := &api.Namespace{
Name: "foo",
}
_, err := client.Namespaces().Register(ns, nil)
assert.Nil(t, err)
// Check status on namespace
if code := cmd.Run([]string{"-address=" + url, ns.Name}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
// Check for basic spec
out := ui.OutputWriter.String()
if !strings.Contains(out, "= foo") {
t.Fatalf("expected quota, got: %s", out)
}
}
func TestNamespaceStatusCommand_Good_Quota(t *testing.T) {
t.Parallel()
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &NamespaceStatusCommand{Meta: Meta{Ui: ui}}
// Create a quota to delete
qs := testQuotaSpec()
_, err := client.Quotas().Register(qs, nil)
assert.Nil(t, err)
// Create a namespace
ns := &api.Namespace{
Name: "foo",
Quota: qs.Name,
}
_, err = client.Namespaces().Register(ns, nil)
assert.Nil(t, err)
// Check status on namespace
if code := cmd.Run([]string{"-address=" + url, ns.Name}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
// Check for basic spec
out := ui.OutputWriter.String()
if !strings.Contains(out, "= foo") {
t.Fatalf("expected quota, got: %s", out)
}
// Check for usage
if !strings.Contains(out, "0 / 100") {
t.Fatalf("expected quota, got: %s", out)
}
}
func TestNamespaceStatusCommand_AutocompleteArgs(t *testing.T) {
assert := assert.New(t)
t.Parallel()
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &NamespaceStatusCommand{Meta: Meta{Ui: ui, flagAddress: url}}
// Create a namespace
ns := &api.Namespace{
Name: "foo",
}
_, err := client.Namespaces().Register(ns, nil)
assert.Nil(err)
args := complete.Args{Last: "f"}
predictor := cmd.AutocompleteArgs()
res := predictor.Predict(args)
assert.Equal(1, len(res))
assert.Equal(ns.Name, res[0])
}

View File

@ -14,9 +14,9 @@ func (c *OperatorRaftCommand) Help() string {
helpText := `
Usage: nomad operator raft <subcommand> [options]
The Raft operator command is used to interact with Nomad's Raft subsystem. The
command can be used to verify Raft peers or in rare cases to recover quorum by
removing invalid peers.
The Raft operator command is used to interact with Nomad's Raft subsystem. The
command can be used to verify Raft peers or in rare cases to recover quorum by
removing invalid peers.
`
return strings.TrimSpace(helpText)
}

View File

@ -17,7 +17,7 @@ func (c *OperatorRaftListCommand) Help() string {
helpText := `
Usage: nomad operator raft list-peers [options]
Displays the current Raft peer configuration.
Displays the current Raft peer configuration.
General Options:

View File

@ -16,14 +16,14 @@ func (c *OperatorRaftRemoveCommand) Help() string {
helpText := `
Usage: nomad operator raft remove-peer [options]
Remove the Nomad server with given -peer-address from the Raft configuration.
Remove the Nomad server with given -peer-address from the Raft configuration.
There are rare cases where a peer may be left behind in the Raft quorum even
though the server is no longer present and known to the cluster. This command
can be used to remove the failed server so that it is no longer affects the Raft
quorum. If the server still shows in the output of the "nomad server-members"
command, it is preferable to clean up by simply running "nomad
server-force-leave" instead of this command.
There are rare cases where a peer may be left behind in the Raft quorum even
though the server is no longer present and known to the cluster. This command
can be used to remove the failed server so that it is no longer affects the
Raft quorum. If the server still shows in the output of the "nomad
server-members" command, it is preferable to clean up by simply running "nomad
server-force-leave" instead of this command.
General Options:

39
command/quota.go Normal file
View File

@ -0,0 +1,39 @@
package command
import (
"github.com/hashicorp/nomad/api/contexts"
"github.com/mitchellh/cli"
"github.com/posener/complete"
)
type QuotaCommand struct {
Meta
}
func (f *QuotaCommand) Help() string {
return "This command is accessed by using one of the subcommands below."
}
func (f *QuotaCommand) Synopsis() string {
return "Interact with quotas"
}
func (f *QuotaCommand) Run(args []string) int {
return cli.RunResultHelp
}
// QuotaPredictor returns a quota predictor
func QuotaPredictor(factory ApiClientFactory) complete.Predictor {
return complete.PredictFunc(func(a complete.Args) []string {
client, err := factory()
if err != nil {
return nil
}
resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Quotas, nil)
if err != nil {
return []string{}
}
return resp.Matches[contexts.Quotas]
})
}

276
command/quota_apply.go Normal file
View File

@ -0,0 +1,276 @@
package command
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
"github.com/mitchellh/mapstructure"
"github.com/posener/complete"
)
type QuotaApplyCommand struct {
Meta
}
func (c *QuotaApplyCommand) Help() string {
helpText := `
Usage: nomad quota apply [options] <input>
Apply is used to create or update a quota specification. The specification file
will be read from stdin by specifying "-", otherwise a path to the file is
expected.
General Options:
` + generalOptionsUsage() + `
Apply Options:
-json
Parse the input as a JSON quota specification.
`
return strings.TrimSpace(helpText)
}
func (c *QuotaApplyCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-json": complete.PredictNothing,
})
}
func (c *QuotaApplyCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictFiles("*")
}
func (c *QuotaApplyCommand) Synopsis() string {
return "Create or update a quota specification"
}
func (c *QuotaApplyCommand) Run(args []string) int {
var jsonInput bool
flags := c.Meta.FlagSet("quota apply", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&jsonInput, "json", false, "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we get exactly one argument
args = flags.Args()
if l := len(args); l != 1 {
c.Ui.Error(c.Help())
return 1
}
// Read the file contents
file := args[0]
var rawQuota []byte
var err error
if file == "-" {
rawQuota, err = ioutil.ReadAll(os.Stdin)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to read stdin: %v", err))
return 1
}
} else {
rawQuota, err = ioutil.ReadFile(file)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to read file: %v", err))
return 1
}
}
var spec *api.QuotaSpec
if jsonInput {
var jsonSpec api.QuotaSpec
dec := json.NewDecoder(bytes.NewBuffer(rawQuota))
if err := dec.Decode(&jsonSpec); err != nil {
c.Ui.Error(fmt.Sprintf("Failed to parse quota: %v", err))
return 1
}
spec = &jsonSpec
} else {
hclSpec, err := parseQuotaSpec(rawQuota)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error parsing quota specification: %s", err))
return 1
}
spec = hclSpec
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
_, err = client.Quotas().Register(spec, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error applying quota specification: %s", err))
return 1
}
c.Ui.Output(fmt.Sprintf("Successfully applied quota specification %q!", spec.Name))
return 0
}
// parseQuotaSpec is used to parse the quota specification from HCL
func parseQuotaSpec(input []byte) (*api.QuotaSpec, error) {
root, err := hcl.ParseBytes(input)
if err != nil {
return nil, err
}
// Top-level item should be a list
list, ok := root.Node.(*ast.ObjectList)
if !ok {
return nil, fmt.Errorf("error parsing: root should be an object")
}
var spec api.QuotaSpec
if err := parseQuotaSpecImpl(&spec, list); err != nil {
return nil, err
}
return &spec, nil
}
// parseQuotaSpecImpl parses the quota spec taking as input the AST tree
func parseQuotaSpecImpl(result *api.QuotaSpec, list *ast.ObjectList) error {
// Check for invalid keys
valid := []string{
"name",
"description",
"limit",
}
if err := helper.CheckHCLKeys(list, valid); err != nil {
return err
}
// Decode the full thing into a map[string]interface for ease
var m map[string]interface{}
if err := hcl.DecodeObject(&m, list); err != nil {
return err
}
// Manually parse
delete(m, "limit")
// Decode the rest
if err := mapstructure.WeakDecode(m, result); err != nil {
return err
}
// Parse limits
if o := list.Filter("limit"); len(o.Items) > 0 {
if err := parseQuotaLimits(&result.Limits, o); err != nil {
return multierror.Prefix(err, "limit ->")
}
}
return nil
}
// parseQuotaLimits parses the quota limits
func parseQuotaLimits(result *[]*api.QuotaLimit, list *ast.ObjectList) error {
for _, o := range list.Elem().Items {
// Check for invalid keys
valid := []string{
"region",
"region_limit",
}
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return err
}
// Manually parse
delete(m, "region_limit")
// Decode the rest
var limit api.QuotaLimit
if err := mapstructure.WeakDecode(m, &limit); err != nil {
return err
}
// We need this later
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return fmt.Errorf("limit should be an object")
}
// Parse limits
if o := listVal.Filter("region_limit"); len(o.Items) > 0 {
limit.RegionLimit = new(api.Resources)
if err := parseQuotaResource(limit.RegionLimit, o); err != nil {
return multierror.Prefix(err, "region_limit ->")
}
}
*result = append(*result, &limit)
}
return nil
}
// parseQuotaResource parses the region_limit resources
func parseQuotaResource(result *api.Resources, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) == 0 {
return nil
}
if len(list.Items) > 1 {
return fmt.Errorf("only one 'region_limit' block allowed per limit")
}
// Get our resource object
o := list.Items[0]
// We need this later
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return fmt.Errorf("resource: should be an object")
}
// Check for invalid keys
valid := []string{
"cpu",
"memory",
}
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return multierror.Prefix(err, "resources ->")
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return err
}
if err := mapstructure.WeakDecode(m, result); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,99 @@
// +build ent
package command
import (
"io/ioutil"
"os"
"strings"
"testing"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/assert"
)
func TestQuotaApplyCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &QuotaApplyCommand{}
}
func TestQuotaApplyCommand_Fails(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &QuotaApplyCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expected help output, got: %s", out)
}
ui.ErrorWriter.Reset()
if code := cmd.Run([]string{"-address=nope"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("name required error, got: %s", out)
}
ui.ErrorWriter.Reset()
}
func TestQuotaApplyCommand_Good_HCL(t *testing.T) {
t.Parallel()
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &QuotaApplyCommand{Meta: Meta{Ui: ui}}
fh1, err := ioutil.TempFile("", "nomad")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(fh1.Name())
if _, err := fh1.WriteString(defaultHclQuotaSpec); err != nil {
t.Fatalf("err: %s", err)
}
// Create a quota spec
if code := cmd.Run([]string{"-address=" + url, fh1.Name()}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
quotas, _, err := client.Quotas().List(nil)
assert.Nil(t, err)
assert.Len(t, quotas, 1)
}
func TestQuotaApplyCommand_Good_JSON(t *testing.T) {
t.Parallel()
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &QuotaApplyCommand{Meta: Meta{Ui: ui}}
fh1, err := ioutil.TempFile("", "nomad")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Remove(fh1.Name())
if _, err := fh1.WriteString(defaultJsonQuotaSpec); err != nil {
t.Fatalf("err: %s", err)
}
// Create a quota spec
if code := cmd.Run([]string{"-address=" + url, "-json", fh1.Name()}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
quotas, _, err := client.Quotas().List(nil)
assert.Nil(t, err)
assert.Len(t, quotas, 1)
}

71
command/quota_delete.go Normal file
View File

@ -0,0 +1,71 @@
package command
import (
"fmt"
"strings"
"github.com/posener/complete"
)
type QuotaDeleteCommand struct {
Meta
}
func (c *QuotaDeleteCommand) Help() string {
helpText := `
Usage: nomad quota delete [options] <quota>
Delete is used to remove a quota.
General Options:
` + generalOptionsUsage()
return strings.TrimSpace(helpText)
}
func (c *QuotaDeleteCommand) AutocompleteFlags() complete.Flags {
return c.Meta.AutocompleteFlags(FlagSetClient)
}
func (c *QuotaDeleteCommand) AutocompleteArgs() complete.Predictor {
return QuotaPredictor(c.Meta.Client)
}
func (c *QuotaDeleteCommand) Synopsis() string {
return "Delete a quota specification"
}
func (c *QuotaDeleteCommand) Run(args []string) int {
flags := c.Meta.FlagSet("quota delete", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got one argument
args = flags.Args()
if l := len(args); l != 1 {
c.Ui.Error(c.Help())
return 1
}
name := args[0]
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
_, err = client.Quotas().Delete(name, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error deleting quota: %s", err))
return 1
}
c.Ui.Output(fmt.Sprintf("Successfully deleted quota %q!", name))
return 0
}

View File

@ -0,0 +1,105 @@
// +build ent
package command
import (
"strings"
"testing"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/helper"
"github.com/mitchellh/cli"
"github.com/posener/complete"
"github.com/stretchr/testify/assert"
)
func TestQuotaDeleteCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &QuotaDeleteCommand{}
}
func TestQuotaDeleteCommand_Fails(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &QuotaDeleteCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expected help output, got: %s", out)
}
ui.ErrorWriter.Reset()
if code := cmd.Run([]string{"-address=nope", "foo"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "deleting quota") {
t.Fatalf("connection error, got: %s", out)
}
ui.ErrorWriter.Reset()
}
func TestQuotaDeleteCommand_Good(t *testing.T) {
t.Parallel()
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &QuotaDeleteCommand{Meta: Meta{Ui: ui}}
// Create a quota to delete
qs := testQuotaSpec()
_, err := client.Quotas().Register(qs, nil)
assert.Nil(t, err)
// Delete a namespace
if code := cmd.Run([]string{"-address=" + url, qs.Name}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
quotas, _, err := client.Quotas().List(nil)
assert.Nil(t, err)
assert.Len(t, quotas, 0)
}
func TestQuotaDeleteCommand_AutocompleteArgs(t *testing.T) {
assert := assert.New(t)
t.Parallel()
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &QuotaDeleteCommand{Meta: Meta{Ui: ui, flagAddress: url}}
// Create a quota
qs := testQuotaSpec()
_, err := client.Quotas().Register(qs, nil)
assert.Nil(err)
args := complete.Args{Last: "t"}
predictor := cmd.AutocompleteArgs()
res := predictor.Predict(args)
assert.Equal(1, len(res))
assert.Equal(qs.Name, res[0])
}
// testQuotaSpec returns a test quota specification
func testQuotaSpec() *api.QuotaSpec {
return &api.QuotaSpec{
Name: "test",
Limits: []*api.QuotaLimit{
{
Region: "global",
RegionLimit: &api.Resources{
CPU: helper.IntToPtr(100),
},
},
},
}
}

133
command/quota_init.go Normal file
View File

@ -0,0 +1,133 @@
package command
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/posener/complete"
)
const (
// DefaultHclQuotaInitName is the default name we use when initializing the
// example quota file in HCL format
DefaultHclQuotaInitName = "spec.hcl"
// DefaultHclQuotaInitName is the default name we use when initializing the
// example quota file in JSON format
DefaultJsonQuotaInitName = "spec.json"
)
// QuotaInitCommand generates a new quota spec that you can customize to your
// liking, like vagrant init
type QuotaInitCommand struct {
Meta
}
func (c *QuotaInitCommand) Help() string {
helpText := `
Usage: nomad quota init
Creates an example quota specification file that can be used as a starting
point to customize further.
Init Options:
-json
Create an example JSON quota specification.
`
return strings.TrimSpace(helpText)
}
func (c *QuotaInitCommand) Synopsis() string {
return "Create an example quota specification file"
}
func (c *QuotaInitCommand) AutocompleteFlags() complete.Flags {
return complete.Flags{
"-json": complete.PredictNothing,
}
}
func (c *QuotaInitCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictNothing
}
func (c *QuotaInitCommand) Run(args []string) int {
var jsonOutput bool
flags := c.Meta.FlagSet("quota init", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&jsonOutput, "json", false, "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we get no arguments
args = flags.Args()
if l := len(args); l != 0 {
c.Ui.Error(c.Help())
return 1
}
fileName := DefaultHclQuotaInitName
fileContent := defaultHclQuotaSpec
if jsonOutput {
fileName = DefaultJsonQuotaInitName
fileContent = defaultJsonQuotaSpec
}
// Check if the file already exists
_, err := os.Stat(fileName)
if err != nil && !os.IsNotExist(err) {
c.Ui.Error(fmt.Sprintf("Failed to stat %q: %v", fileName, err))
return 1
}
if !os.IsNotExist(err) {
c.Ui.Error(fmt.Sprintf("Quota specification %q already exists", fileName))
return 1
}
// Write out the example
err = ioutil.WriteFile(fileName, []byte(fileContent), 0660)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to write %q: %v", fileName, err))
return 1
}
// Success
c.Ui.Output(fmt.Sprintf("Example quota specification written to %s", fileName))
return 0
}
var defaultHclQuotaSpec = strings.TrimSpace(`
name = "default-quota"
description = "Limit the shared default namespace"
# Create a limit for the global region. Additional limits may
# be specified in-order to limit other regions.
limit {
region = "global"
region_limit {
cpu = 2500
memory = 1000
}
}
`)
var defaultJsonQuotaSpec = strings.TrimSpace(`
{
"Name": "default-quota",
"Description": "Limit the shared default namespace",
"Limits": [
{
"Region": "global",
"RegionLimit": {
"CPU": 2500,
"MemoryMB": 1000
}
}
]
}
`)

119
command/quota_init_test.go Normal file
View File

@ -0,0 +1,119 @@
package command
import (
"io/ioutil"
"os"
"strings"
"testing"
"github.com/mitchellh/cli"
)
func TestQuotaInitCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &QuotaInitCommand{}
}
func TestQuotaInitCommand_Run_HCL(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &QuotaInitCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expect exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expect help output, got: %s", out)
}
ui.ErrorWriter.Reset()
// Ensure we change the cwd back
origDir, err := os.Getwd()
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Chdir(origDir)
// Create a temp dir and change into it
dir, err := ioutil.TempDir("", "nomad")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.RemoveAll(dir)
if err := os.Chdir(dir); err != nil {
t.Fatalf("err: %s", err)
}
// Works if the file doesn't exist
if code := cmd.Run([]string{}); code != 0 {
t.Fatalf("expect exit code 0, got: %d", code)
}
content, err := ioutil.ReadFile(DefaultHclQuotaInitName)
if err != nil {
t.Fatalf("err: %s", err)
}
if string(content) != defaultHclQuotaSpec {
t.Fatalf("unexpected file content\n\n%s", string(content))
}
// Fails if the file exists
if code := cmd.Run([]string{}); code != 1 {
t.Fatalf("expect exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "exists") {
t.Fatalf("expect file exists error, got: %s", out)
}
}
func TestQuotaInitCommand_Run_JSON(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &QuotaInitCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expect exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expect help output, got: %s", out)
}
ui.ErrorWriter.Reset()
// Ensure we change the cwd back
origDir, err := os.Getwd()
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.Chdir(origDir)
// Create a temp dir and change into it
dir, err := ioutil.TempDir("", "nomad")
if err != nil {
t.Fatalf("err: %s", err)
}
defer os.RemoveAll(dir)
if err := os.Chdir(dir); err != nil {
t.Fatalf("err: %s", err)
}
// Works if the file doesn't exist
if code := cmd.Run([]string{"-json"}); code != 0 {
t.Fatalf("expect exit code 0, got: %d", code)
}
content, err := ioutil.ReadFile(DefaultJsonQuotaInitName)
if err != nil {
t.Fatalf("err: %s", err)
}
if string(content) != defaultJsonQuotaSpec {
t.Fatalf("unexpected file content\n\n%s", string(content))
}
// Fails if the file exists
if code := cmd.Run([]string{"-json"}); code != 1 {
t.Fatalf("expect exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "exists") {
t.Fatalf("expect file exists error, got: %s", out)
}
}

116
command/quota_inspect.go Normal file
View File

@ -0,0 +1,116 @@
package command
import (
"fmt"
"strings"
"github.com/hashicorp/nomad/api"
"github.com/posener/complete"
)
type QuotaInspectCommand struct {
Meta
}
type inspectedQuota struct {
Spec *api.QuotaSpec
Usages map[string]*api.QuotaUsage
Failures map[string]string `json:"UsageLookupErrors"`
}
func (c *QuotaInspectCommand) Help() string {
helpText := `
Usage: nomad quota inspect [options] <quota>
Inspect is used to view raw information about a particular quota.
General Options:
` + generalOptionsUsage() + `
Inspect Options:
-t
Format and display the namespaces using a Go template.
`
return strings.TrimSpace(helpText)
}
func (c *QuotaInspectCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-t": complete.PredictAnything,
})
}
func (c *QuotaInspectCommand) AutocompleteArgs() complete.Predictor {
return QuotaPredictor(c.Meta.Client)
}
func (c *QuotaInspectCommand) Synopsis() string {
return "Inspect a quota specification"
}
func (c *QuotaInspectCommand) Run(args []string) int {
var tmpl string
flags := c.Meta.FlagSet("quota inspect", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.StringVar(&tmpl, "t", "", "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got one arguments
args = flags.Args()
if l := len(args); l != 1 {
c.Ui.Error(c.Help())
return 1
}
name := args[0]
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Do a prefix lookup
quotas := client.Quotas()
spec, possible, err := getQuota(quotas, name)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error retrieving quota: %s", err))
return 1
}
if len(possible) != 0 {
c.Ui.Error(fmt.Sprintf("Prefix matched multiple quotas\n\n%s", formatQuotaSpecs(possible)))
return 1
}
// Get the quota usages
usages, failures := quotaUsages(spec, quotas)
failuresConverted := make(map[string]string, len(failures))
for r, e := range failures {
failuresConverted[r] = e.Error()
}
data := &inspectedQuota{
Spec: spec,
Usages: usages,
Failures: failuresConverted,
}
out, err := Format(len(tmpl) == 0, tmpl, data)
if err != nil {
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(out)
return 0
}

View File

@ -0,0 +1,89 @@
// +build ent
package command
import (
"strings"
"testing"
"github.com/mitchellh/cli"
"github.com/posener/complete"
"github.com/stretchr/testify/assert"
)
func TestQuotaInspectCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &QuotaInspectCommand{}
}
func TestQuotaInspectCommand_Fails(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &QuotaInspectCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expected help output, got: %s", out)
}
ui.ErrorWriter.Reset()
if code := cmd.Run([]string{"-address=nope", "foo"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "retrieving quota") {
t.Fatalf("connection error, got: %s", out)
}
ui.ErrorWriter.Reset()
}
func TestQuotaInspectCommand_Good(t *testing.T) {
t.Parallel()
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &QuotaInspectCommand{Meta: Meta{Ui: ui}}
// Create a quota to delete
qs := testQuotaSpec()
_, err := client.Quotas().Register(qs, nil)
assert.Nil(t, err)
// Delete a namespace
if code := cmd.Run([]string{"-address=" + url, qs.Name}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
out := ui.OutputWriter.String()
if !strings.Contains(out, "Usages") || !strings.Contains(out, qs.Name) {
t.Fatalf("expected quota, got: %s", out)
}
}
func TestQuotaInspectCommand_AutocompleteArgs(t *testing.T) {
assert := assert.New(t)
t.Parallel()
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &QuotaInspectCommand{Meta: Meta{Ui: ui, flagAddress: url}}
// Create a quota
qs := testQuotaSpec()
_, err := client.Quotas().Register(qs, nil)
assert.Nil(err)
args := complete.Args{Last: "t"}
predictor := cmd.AutocompleteArgs()
res := predictor.Predict(args)
assert.Equal(1, len(res))
assert.Equal(qs.Name, res[0])
}

117
command/quota_list.go Normal file
View File

@ -0,0 +1,117 @@
package command
import (
"fmt"
"sort"
"strings"
"github.com/hashicorp/nomad/api"
"github.com/posener/complete"
)
type QuotaListCommand struct {
Meta
}
func (c *QuotaListCommand) Help() string {
helpText := `
Usage: nomad quota list [options]
List is used to list available quotas.
General Options:
` + generalOptionsUsage() + `
List Options:
-json
Output the namespaces in a JSON format.
-t
Format and display the namespaces using a Go template.
`
return strings.TrimSpace(helpText)
}
func (c *QuotaListCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-json": complete.PredictNothing,
"-t": complete.PredictAnything,
})
}
func (c *QuotaListCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictNothing
}
func (c *QuotaListCommand) Synopsis() string {
return "List quota specifications"
}
func (c *QuotaListCommand) Run(args []string) int {
var json bool
var tmpl string
flags := c.Meta.FlagSet("quota list", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&json, "json", false, "")
flags.StringVar(&tmpl, "t", "", "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got no arguments
args = flags.Args()
if l := len(args); l != 0 {
c.Ui.Error(c.Help())
return 1
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
quotas, _, err := client.Quotas().List(nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error retrieving quotas: %s", err))
return 1
}
if json || len(tmpl) > 0 {
out, err := Format(json, tmpl, quotas)
if err != nil {
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(out)
return 0
}
c.Ui.Output(formatQuotaSpecs(quotas))
return 0
}
func formatQuotaSpecs(quotas []*api.QuotaSpec) string {
if len(quotas) == 0 {
return "No quotas found"
}
// Sort the output by quota name
sort.Slice(quotas, func(i, j int) bool { return quotas[i].Name < quotas[j].Name })
rows := make([]string, len(quotas)+1)
rows[0] = "Name|Description"
for i, qs := range quotas {
rows[i+1] = fmt.Sprintf("%s|%s",
qs.Name,
qs.Description)
}
return formatList(rows)
}

View File

@ -0,0 +1,77 @@
// +build ent
package command
import (
"strings"
"testing"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/assert"
)
func TestQuotaListCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &QuotaListCommand{}
}
func TestQuotaListCommand_Fails(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &QuotaListCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expected help output, got: %s", out)
}
ui.ErrorWriter.Reset()
if code := cmd.Run([]string{"-address=nope"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error retrieving quotas") {
t.Fatalf("expected failed query error, got: %s", out)
}
ui.ErrorWriter.Reset()
}
func TestQuotaListCommand_List(t *testing.T) {
t.Parallel()
assert := assert.New(t)
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &QuotaListCommand{Meta: Meta{Ui: ui}}
// Create a quota
qs := testQuotaSpec()
_, err := client.Quotas().Register(qs, nil)
assert.Nil(err)
// List should contain the new quota
if code := cmd.Run([]string{"-address=" + url}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
out := ui.OutputWriter.String()
if !strings.Contains(out, qs.Name) || !strings.Contains(out, qs.Description) {
t.Fatalf("expected quota, got: %s", out)
}
ui.OutputWriter.Reset()
// List json
t.Log(url)
if code := cmd.Run([]string{"-address=" + url, "-json"}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
out = ui.OutputWriter.String()
if !strings.Contains(out, "CreateIndex") {
t.Fatalf("expected json output, got: %s", out)
}
ui.OutputWriter.Reset()
}

220
command/quota_status.go Normal file
View File

@ -0,0 +1,220 @@
package command
import (
"encoding/base64"
"fmt"
"sort"
"strconv"
"strings"
"github.com/hashicorp/nomad/api"
"github.com/posener/complete"
)
type QuotaStatusCommand struct {
Meta
}
func (c *QuotaStatusCommand) Help() string {
helpText := `
Usage: nomad quota status [options] <quota>
Status is used to view the status of a particular quota.
General Options:
` + generalOptionsUsage()
return strings.TrimSpace(helpText)
}
func (c *QuotaStatusCommand) AutocompleteFlags() complete.Flags {
return c.Meta.AutocompleteFlags(FlagSetClient)
}
func (c *QuotaStatusCommand) AutocompleteArgs() complete.Predictor {
return QuotaPredictor(c.Meta.Client)
}
func (c *QuotaStatusCommand) Synopsis() string {
return "Display a quota's status and current usage"
}
func (c *QuotaStatusCommand) Run(args []string) int {
flags := c.Meta.FlagSet("quota status", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got one arguments
args = flags.Args()
if l := len(args); l != 1 {
c.Ui.Error(c.Help())
return 1
}
name := args[0]
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Do a prefix lookup
quotas := client.Quotas()
spec, possible, err := getQuota(quotas, name)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error retrieving quota: %s", err))
return 1
}
if len(possible) != 0 {
c.Ui.Error(fmt.Sprintf("Prefix matched multiple quotas\n\n%s", formatQuotaSpecs(possible)))
return 1
}
// Format the basics
c.Ui.Output(formatQuotaSpecBasics(spec))
// Get the quota usages
usages, failures := quotaUsages(spec, quotas)
// Format the limits
c.Ui.Output(c.Colorize().Color("\n[bold]Quota Limits[reset]"))
c.Ui.Output(formatQuotaLimits(spec, usages))
// Display any failures
if len(failures) != 0 {
c.Ui.Error(c.Colorize().Color("\n[bold][red]Lookup Failures[reset]"))
for region, failure := range failures {
c.Ui.Error(fmt.Sprintf(" * Failed to retrieve quota usage for region %q: %v", region, failure))
return 1
}
}
return 0
}
// quotaUsages returns the quota usages for the limits described by the spec. It
// will make a request to each referenced Nomad region. If the region couldn't
// be contacted, the error will be stored in the failures map
func quotaUsages(spec *api.QuotaSpec, client *api.Quotas) (usages map[string]*api.QuotaUsage, failures map[string]error) {
// Determine the regions we have limits for
regions := make(map[string]struct{})
for _, limit := range spec.Limits {
regions[limit.Region] = struct{}{}
}
usages = make(map[string]*api.QuotaUsage, len(regions))
failures = make(map[string]error)
q := api.QueryOptions{}
// Retrieve the usage per region
for region := range regions {
q.Region = region
usage, _, err := client.Usage(spec.Name, &q)
if err != nil {
failures[region] = err
continue
}
usages[region] = usage
}
return usages, failures
}
// formatQuotaSpecBasics formats the basic information of the quota
// specification.
func formatQuotaSpecBasics(spec *api.QuotaSpec) string {
basic := []string{
fmt.Sprintf("Name|%s", spec.Name),
fmt.Sprintf("Description|%s", spec.Description),
fmt.Sprintf("Limits|%d", len(spec.Limits)),
}
return formatKV(basic)
}
// formatQuotaLimits formats the limits to display the quota usage versus the
// limit per quota limit. It takes as input the specification as well as quota
// usage by region. The formatter handles missing usages.
func formatQuotaLimits(spec *api.QuotaSpec, usages map[string]*api.QuotaUsage) string {
if len(spec.Limits) == 0 {
return "No quota limits defined"
}
// Sort the limits
sort.Sort(api.QuotaLimitSort(spec.Limits))
limits := make([]string, len(spec.Limits)+1)
limits[0] = "Region|CPU Usage|Memory Usage"
i := 0
for _, specLimit := range spec.Limits {
i++
// lookupUsage returns the regions quota usage for the limit
lookupUsage := func() (*api.QuotaLimit, bool) {
usage, ok := usages[specLimit.Region]
if !ok {
return nil, false
}
used, ok := usage.Used[base64.StdEncoding.EncodeToString(specLimit.Hash)]
return used, ok
}
used, ok := lookupUsage()
if !ok {
cpu := fmt.Sprintf("- / %s", formatQuotaLimitInt(specLimit.RegionLimit.CPU))
memory := fmt.Sprintf("- / %s", formatQuotaLimitInt(specLimit.RegionLimit.MemoryMB))
limits[i] = fmt.Sprintf("%s|%s|%s", specLimit.Region, cpu, memory)
continue
}
cpu := fmt.Sprintf("%d / %s", *used.RegionLimit.CPU, formatQuotaLimitInt(specLimit.RegionLimit.CPU))
memory := fmt.Sprintf("%d / %s", *used.RegionLimit.MemoryMB, formatQuotaLimitInt(specLimit.RegionLimit.MemoryMB))
limits[i] = fmt.Sprintf("%s|%s|%s", specLimit.Region, cpu, memory)
}
return formatList(limits)
}
// formatQuotaLimitInt takes a integer resource value and returns the
// appropriate string for output.
func formatQuotaLimitInt(value *int) string {
if value == nil {
return "-"
}
v := *value
if v < 0 {
return "0"
} else if v == 0 {
return "inf"
}
return strconv.Itoa(v)
}
func getQuota(client *api.Quotas, quota string) (match *api.QuotaSpec, possible []*api.QuotaSpec, err error) {
// Do a prefix lookup
quotas, _, err := client.PrefixList(quota, nil)
if err != nil {
return nil, nil, err
}
l := len(quotas)
switch {
case l == 0:
return nil, nil, fmt.Errorf("Quota %q matched no quotas", quota)
case l == 1:
return quotas[0], nil, nil
default:
return nil, quotas, nil
}
}

View File

@ -0,0 +1,95 @@
// +build ent
package command
import (
"strings"
"testing"
"github.com/mitchellh/cli"
"github.com/posener/complete"
"github.com/stretchr/testify/assert"
)
func TestQuotaStatusCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &QuotaStatusCommand{}
}
func TestQuotaStatusCommand_Fails(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &QuotaStatusCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expected help output, got: %s", out)
}
ui.ErrorWriter.Reset()
if code := cmd.Run([]string{"-address=nope", "foo"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "retrieving quota") {
t.Fatalf("connection error, got: %s", out)
}
ui.ErrorWriter.Reset()
}
func TestQuotaStatusCommand_Good(t *testing.T) {
t.Parallel()
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &QuotaStatusCommand{Meta: Meta{Ui: ui}}
// Create a quota to delete
qs := testQuotaSpec()
_, err := client.Quotas().Register(qs, nil)
assert.Nil(t, err)
// Delete a namespace
if code := cmd.Run([]string{"-address=" + url, qs.Name}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
// Check for basic spec
out := ui.OutputWriter.String()
if !strings.Contains(out, "= test") {
t.Fatalf("expected quota, got: %s", out)
}
// Check for usage
if !strings.Contains(out, "0 / 100") {
t.Fatalf("expected quota, got: %s", out)
}
}
func TestQuotaStatusCommand_AutocompleteArgs(t *testing.T) {
assert := assert.New(t)
t.Parallel()
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &QuotaStatusCommand{Meta: Meta{Ui: ui, flagAddress: url}}
// Create a quota
qs := testQuotaSpec()
_, err := client.Quotas().Register(qs, nil)
assert.Nil(err)
args := complete.Args{Last: "t"}
predictor := cmd.AutocompleteArgs()
res := predictor.Predict(args)
assert.Equal(1, len(res))
assert.Equal(qs.Name, res[0])
}

View File

@ -18,9 +18,9 @@ func (c *SentinelApplyCommand) Help() string {
helpText := `
Usage: nomad sentinel apply [options] <name> <file>
Apply is used to write a new Sentinel policy or update an existing one.
The name of the policy and file must be specified. The file will be read
from stdin by specifying "-".
Apply is used to write a new Sentinel policy or update an existing one.
The name of the policy and file must be specified. The file will be read
from stdin by specifying "-".
General Options:

View File

@ -15,7 +15,7 @@ func (c *SentinelDeleteCommand) Help() string {
helpText := `
Usage: nomad sentinel delete [options] <name>
Delete is used to delete an existing Sentinel policy.
Delete is used to delete an existing Sentinel policy.
General Options:

View File

@ -15,7 +15,7 @@ func (c *SentinelListCommand) Help() string {
helpText := `
Usage: nomad sentinel list [options]
List is used to display all the installed Sentinel policies.
List is used to display all the installed Sentinel policies.
General Options:

View File

@ -15,7 +15,7 @@ func (c *SentinelReadCommand) Help() string {
helpText := `
Usage: nomad sentinel read [options] <name>
Read is used to inspect a Sentinel policy.
Read is used to inspect a Sentinel policy.
General Options:

View File

@ -105,21 +105,31 @@ func (c *StatusCommand) Run(args []string) int {
}
var match contexts.Context
matchCount := 0
exactMatches := 0
for ctx, vers := range res.Matches {
if l := len(vers); l == 1 {
if len(vers) > 0 && vers[0] == id {
match = ctx
matchCount++
} else if l > 0 && vers[0] == id {
// Exact match
match = ctx
break
exactMatches++
}
}
// Only a single result should return, as this is a match against a full id
if matchCount > 1 || len(vers) > 1 {
c.logMultiMatchError(id, res.Matches)
return 1
if exactMatches > 1 {
c.logMultiMatchError(id, res.Matches)
return 1
} else if exactMatches == 0 {
matchCount := 0
for ctx, vers := range res.Matches {
l := len(vers)
if l == 1 {
match = ctx
matchCount++
}
// Only a single result should return, as this is a match against a full id
if matchCount > 1 || l > 1 {
c.logMultiMatchError(id, res.Matches)
return 1
}
}
}
@ -135,6 +145,10 @@ func (c *StatusCommand) Run(args []string) int {
cmd = &JobStatusCommand{Meta: c.Meta}
case contexts.Deployments:
cmd = &DeploymentStatusCommand{Meta: c.Meta}
case contexts.Namespaces:
cmd = &NamespaceStatusCommand{Meta: c.Meta}
case contexts.Quotas:
cmd = &QuotaStatusCommand{Meta: c.Meta}
default:
c.Ui.Error(fmt.Sprintf("Unable to resolve ID: %q", id))
return 1

View File

@ -233,11 +233,21 @@ func Commands(metaPtr *command.Meta) map[string]cli.CommandFactory {
Meta: meta,
}, nil
},
"namespace inspect": func() (cli.Command, error) {
return &command.NamespaceInspectCommand{
Meta: meta,
}, nil
},
"namespace list": func() (cli.Command, error) {
return &command.NamespaceListCommand{
Meta: meta,
}, nil
},
"namespace status": func() (cli.Command, error) {
return &command.NamespaceStatusCommand{
Meta: meta,
}, nil
},
"node-drain": func() (cli.Command, error) {
return &command.NodeDrainCommand{
Meta: meta,
@ -279,6 +289,48 @@ func Commands(metaPtr *command.Meta) map[string]cli.CommandFactory {
}, nil
},
"quota": func() (cli.Command, error) {
return &command.QuotaCommand{
Meta: meta,
}, nil
},
"quota apply": func() (cli.Command, error) {
return &command.QuotaApplyCommand{
Meta: meta,
}, nil
},
"quota delete": func() (cli.Command, error) {
return &command.QuotaDeleteCommand{
Meta: meta,
}, nil
},
"quota init": func() (cli.Command, error) {
return &command.QuotaInitCommand{
Meta: meta,
}, nil
},
"quota inspect": func() (cli.Command, error) {
return &command.QuotaInspectCommand{
Meta: meta,
}, nil
},
"quota list": func() (cli.Command, error) {
return &command.QuotaListCommand{
Meta: meta,
}, nil
},
"quota status": func() (cli.Command, error) {
return &command.QuotaStatusCommand{
Meta: meta,
}, nil
},
"run": func() (cli.Command, error) {
return &command.RunCommand{
Meta: meta,

View File

@ -5,6 +5,9 @@ import (
"fmt"
"regexp"
"time"
multierror "github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl/hcl/ast"
)
// validUUID is used to check if a given string looks like a UUID
@ -270,3 +273,31 @@ func CleanEnvVar(s string, r byte) string {
}
return string(b)
}
func CheckHCLKeys(node ast.Node, valid []string) error {
var list *ast.ObjectList
switch n := node.(type) {
case *ast.ObjectList:
list = n
case *ast.ObjectType:
list = n.List
default:
return fmt.Errorf("cannot check HCL keys of type %T", n)
}
validMap := make(map[string]struct{}, len(valid))
for _, v := range valid {
validMap[v] = struct{}{}
}
var result error
for _, item := range list.Items {
key := item.Keys[0].Token.Value().(string)
if _, ok := validMap[key]; !ok {
result = multierror.Append(result, fmt.Errorf(
"invalid key: %s", key))
}
}
return result
}

View File

@ -51,7 +51,7 @@ func Parse(r io.Reader) (*api.Job, error) {
valid := []string{
"job",
}
if err := checkHCLKeys(list, valid); err != nil {
if err := helper.CheckHCLKeys(list, valid); err != nil {
return nil, err
}
@ -146,7 +146,7 @@ func parseJob(result *api.Job, list *ast.ObjectList) error {
"vault",
"vault_token",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return multierror.Prefix(err, "job:")
}
@ -276,7 +276,7 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error {
"update",
"vault",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n))
}
@ -391,7 +391,7 @@ func parseRestartPolicy(final **api.RestartPolicy, list *ast.ObjectList) error {
"delay",
"mode",
}
if err := checkHCLKeys(obj.Val, valid); err != nil {
if err := helper.CheckHCLKeys(obj.Val, valid); err != nil {
return err
}
@ -430,7 +430,7 @@ func parseConstraints(result *[]*api.Constraint, list *ast.ObjectList) error {
"value",
"version",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
return err
}
@ -513,7 +513,7 @@ func parseEphemeralDisk(result **api.EphemeralDisk, list *ast.ObjectList) error
"size",
"migrate",
}
if err := checkHCLKeys(obj.Val, valid); err != nil {
if err := helper.CheckHCLKeys(obj.Val, valid); err != nil {
return err
}
@ -592,7 +592,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*api.Task, list
"user",
"vault",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n))
}
@ -708,7 +708,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*api.Task, list
"max_files",
"max_file_size",
}
if err := checkHCLKeys(logsBlock.Val, valid); err != nil {
if err := helper.CheckHCLKeys(logsBlock.Val, valid); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', logs ->", n))
}
@ -764,7 +764,7 @@ func parseTasks(jobName string, taskGroupName string, result *[]*api.Task, list
valid := []string{
"file",
}
if err := checkHCLKeys(dispatchBlock.Val, valid); err != nil {
if err := helper.CheckHCLKeys(dispatchBlock.Val, valid); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', dispatch_payload ->", n))
}
@ -793,7 +793,7 @@ func parseArtifacts(result *[]*api.TaskArtifact, list *ast.ObjectList) error {
"mode",
"destination",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
return err
}
@ -867,7 +867,7 @@ func parseTemplates(result *[]*api.Template, list *ast.ObjectList) error {
"env",
"vault_grace",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
return err
}
@ -911,7 +911,7 @@ func parseServices(jobName string, taskGroupName string, task *api.Task, service
"check",
"address_mode",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
return multierror.Prefix(err, fmt.Sprintf("service (%d) ->", idx))
}
@ -980,7 +980,7 @@ func parseChecks(service *api.Service, checkObjs *ast.ObjectList) error {
"method",
"check_restart",
}
if err := checkHCLKeys(co.Val, valid); err != nil {
if err := helper.CheckHCLKeys(co.Val, valid); err != nil {
return multierror.Prefix(err, "check ->")
}
@ -1066,7 +1066,7 @@ func parseCheckRestart(cro *ast.ObjectItem) (*api.CheckRestart, error) {
"ignore_warnings",
}
if err := checkHCLKeys(cro.Val, valid); err != nil {
if err := helper.CheckHCLKeys(cro.Val, valid); err != nil {
return nil, multierror.Prefix(err, "check_restart ->")
}
@ -1119,7 +1119,7 @@ func parseResources(result *api.Resources, list *ast.ObjectList) error {
"memory",
"network",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return multierror.Prefix(err, "resources ->")
}
@ -1144,7 +1144,7 @@ func parseResources(result *api.Resources, list *ast.ObjectList) error {
"mbits",
"port",
}
if err := checkHCLKeys(o.Items[0].Val, valid); err != nil {
if err := helper.CheckHCLKeys(o.Items[0].Val, valid); err != nil {
return multierror.Prefix(err, "resources, network ->")
}
@ -1179,7 +1179,7 @@ func parsePorts(networkObj *ast.ObjectList, nw *api.NetworkResource) error {
"mbits",
"port",
}
if err := checkHCLKeys(networkObj, valid); err != nil {
if err := helper.CheckHCLKeys(networkObj, valid); err != nil {
return err
}
@ -1241,7 +1241,7 @@ func parseUpdate(result **api.UpdateStrategy, list *ast.ObjectList) error {
"auto_revert",
"canary",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
return err
}
@ -1277,7 +1277,7 @@ func parsePeriodic(result **api.PeriodicConfig, list *ast.ObjectList) error {
"prohibit_overlap",
"time_zone",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
return err
}
@ -1331,7 +1331,7 @@ func parseVault(result *api.Vault, list *ast.ObjectList) error {
"change_mode",
"change_signal",
}
if err := checkHCLKeys(listVal, valid); err != nil {
if err := helper.CheckHCLKeys(listVal, valid); err != nil {
return multierror.Prefix(err, "vault ->")
}
@ -1367,7 +1367,7 @@ func parseParameterizedJob(result **api.ParameterizedJobConfig, list *ast.Object
"meta_required",
"meta_optional",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
if err := helper.CheckHCLKeys(o.Val, valid); err != nil {
return err
}
@ -1380,31 +1380,3 @@ func parseParameterizedJob(result **api.ParameterizedJobConfig, list *ast.Object
*result = &d
return nil
}
func checkHCLKeys(node ast.Node, valid []string) error {
var list *ast.ObjectList
switch n := node.(type) {
case *ast.ObjectList:
list = n
case *ast.ObjectType:
list = n.List
default:
return fmt.Errorf("cannot check HCL keys of type %T", n)
}
validMap := make(map[string]struct{}, len(valid))
for _, v := range valid {
validMap[v] = struct{}{}
}
var result error
for _, item := range list.Items {
key := item.Keys[0].Token.Value().(string)
if _, ok := validMap[key]; !ok {
result = multierror.Append(result, fmt.Errorf(
"invalid key: %s", key))
}
}
return result
}

View File

@ -33,7 +33,8 @@ func RunCustom(args []string, commands map[string]cli.CommandFactory) int {
"deployment resume", "deployment fail", "deployment promote":
case "fs ls", "fs cat", "fs stat":
case "job deployments", "job dispatch", "job history", "job promote", "job revert":
case "namespace list", "namespace delete", "namespace apply":
case "namespace list", "namespace delete", "namespace apply", "namespace inspect", "namespace status":
case "quota list", "quota delete", "quota apply", "quota status", "quota inspect", "quota init":
case "operator raft", "operator raft list-peers", "operator raft remove-peer":
case "acl policy", "acl policy apply", "acl token", "acl token create":
default:

View File

@ -1,7 +1,6 @@
package nomad
import (
"os"
"testing"
lru "github.com/hashicorp/golang-lru"
@ -15,8 +14,7 @@ import (
func TestResolveACLToken(t *testing.T) {
// Create mock state store and cache
state, err := state.NewStateStore(os.Stderr)
assert.Nil(t, err)
state := state.TestStateStore(t)
cache, err := lru.New2Q(16)
assert.Nil(t, err)

View File

@ -14,6 +14,13 @@ const (
// should be large to ensure that the FSM doesn't block when calling Unblock
// as this would apply back-pressure on Raft.
unblockBuffer = 8096
// pruneInterval is the interval at which we prune objects from the
// BlockedEvals tracker
pruneInterval = 5 * time.Minute
// pruneThreshold is the threshold after which objects will be pruned.
pruneThreshold = 15 * time.Minute
)
// BlockedEvals is used to track evaluations that shouldn't be queued until a
@ -42,10 +49,10 @@ type BlockedEvals struct {
// blocked eval exists for each job. The value is the blocked evaluation ID.
jobs map[string]string
// unblockIndexes maps computed node classes to the index in which they were
// unblocked. This is used to check if an evaluation could have been
// unblocked between the time they were in the scheduler and the time they
// are being blocked.
// unblockIndexes maps computed node classes or quota name to the index in
// which they were unblocked. This is used to check if an evaluation could
// have been unblocked between the time they were in the scheduler and the
// time they are being blocked.
unblockIndexes map[string]uint64
// duplicates is the set of evaluations for jobs that had pre-existing
@ -58,6 +65,10 @@ type BlockedEvals struct {
// duplicates.
duplicateCh chan struct{}
// timetable is used to coorelate indexes with their insertion time. This
// allows us to prune based on time.
timetable *TimeTable
// stopCh is used to stop any created goroutines.
stopCh chan struct{}
}
@ -65,6 +76,7 @@ type BlockedEvals struct {
// capacityUpdate stores unblock data.
type capacityUpdate struct {
computedClass string
quotaChange string
index uint64
}
@ -82,6 +94,10 @@ type BlockedStats struct {
// TotalBlocked is the total number of blocked evaluations.
TotalBlocked int
// TotalQuotaLimit is the total number of blocked evaluations that are due
// to the quota limit being reached.
TotalQuotaLimit int
}
// NewBlockedEvals creates a new blocked eval tracker that will enqueue
@ -117,6 +133,7 @@ func (b *BlockedEvals) SetEnabled(enabled bool) {
return
} else if enabled {
go b.watchCapacity()
go b.prune()
} else {
close(b.stopCh)
}
@ -127,6 +144,12 @@ func (b *BlockedEvals) SetEnabled(enabled bool) {
}
}
func (b *BlockedEvals) SetTimetable(timetable *TimeTable) {
b.l.Lock()
b.timetable = timetable
b.l.Unlock()
}
// Block tracks the passed evaluation and enqueues it into the eval broker when
// a suitable node calls unblock.
func (b *BlockedEvals) Block(eval *structs.Evaluation) {
@ -182,8 +205,13 @@ func (b *BlockedEvals) processBlock(eval *structs.Evaluation, token string) {
}
// Mark the job as tracked.
b.stats.TotalBlocked++
b.jobs[eval.JobID] = eval.ID
b.stats.TotalBlocked++
// Track that the evaluation is being added due to reaching the quota limit
if eval.QuotaLimitReached != "" {
b.stats.TotalQuotaLimit++
}
// Wrap the evaluation, capturing its token.
wrapped := wrappedEval{
@ -213,13 +241,29 @@ func (b *BlockedEvals) processBlock(eval *structs.Evaluation, token string) {
// the lock held.
func (b *BlockedEvals) missedUnblock(eval *structs.Evaluation) bool {
var max uint64 = 0
for class, index := range b.unblockIndexes {
for id, index := range b.unblockIndexes {
// Calculate the max unblock index
if max < index {
max = index
}
elig, ok := eval.ClassEligibility[class]
// The evaluation is blocked because it has hit a quota limit not class
// eligibility
if eval.QuotaLimitReached != "" {
if eval.QuotaLimitReached != id {
// Not a match
continue
} else if eval.SnapshotIndex < index {
// The evaluation was processed before the quota specification was
// updated, so unblock the evaluation.
return true
}
// The evaluation was processed having seen all changes to the quota
return false
}
elig, ok := eval.ClassEligibility[id]
if !ok && eval.SnapshotIndex < index {
// The evaluation was processed and did not encounter this class
// because it was added after it was processed. Thus for correctness
@ -268,6 +312,9 @@ func (b *BlockedEvals) Untrack(jobID string) {
delete(b.jobs, w.eval.JobID)
delete(b.captured, evalID)
b.stats.TotalBlocked--
if w.eval.QuotaLimitReached != "" {
b.stats.TotalQuotaLimit--
}
}
if w, ok := b.escaped[evalID]; ok {
@ -275,6 +322,9 @@ func (b *BlockedEvals) Untrack(jobID string) {
delete(b.escaped, evalID)
b.stats.TotalEscaped--
b.stats.TotalBlocked--
if w.eval.QuotaLimitReached != "" {
b.stats.TotalQuotaLimit--
}
}
}
@ -302,6 +352,62 @@ func (b *BlockedEvals) Unblock(computedClass string, index uint64) {
}
}
// UnblockQuota causes any evaluation that could potentially make progress on a
// capacity change on the passed quota to be enqueued into the eval broker.
func (b *BlockedEvals) UnblockQuota(quota string, index uint64) {
// Nothing to do
if quota == "" {
return
}
b.l.Lock()
// Do nothing if not enabled
if !b.enabled {
b.l.Unlock()
return
}
// Store the index in which the unblock happened. We use this on subsequent
// block calls in case the evaluation was in the scheduler when a trigger
// occurred.
b.unblockIndexes[quota] = index
b.l.Unlock()
b.capacityChangeCh <- &capacityUpdate{
quotaChange: quota,
index: index,
}
}
// UnblockClassAndQuota causes any evaluation that could potentially make
// progress on a capacity change on the passed computed node class or quota to
// be enqueued into the eval broker.
func (b *BlockedEvals) UnblockClassAndQuota(class, quota string, index uint64) {
b.l.Lock()
// Do nothing if not enabled
if !b.enabled {
b.l.Unlock()
return
}
// Store the index in which the unblock happened. We use this on subsequent
// block calls in case the evaluation was in the scheduler when a trigger
// occurred.
if quota != "" {
b.unblockIndexes[quota] = index
}
b.unblockIndexes[class] = index
b.l.Unlock()
b.capacityChangeCh <- &capacityUpdate{
computedClass: class,
quotaChange: quota,
index: index,
}
}
// watchCapacity is a long lived function that watches for capacity changes in
// nodes and unblocks the correct set of evals.
func (b *BlockedEvals) watchCapacity() {
@ -310,14 +416,12 @@ func (b *BlockedEvals) watchCapacity() {
case <-b.stopCh:
return
case update := <-b.capacityChangeCh:
b.unblock(update.computedClass, update.index)
b.unblock(update.computedClass, update.quotaChange, update.index)
}
}
}
// unblock unblocks all blocked evals that could run on the passed computed node
// class.
func (b *BlockedEvals) unblock(computedClass string, index uint64) {
func (b *BlockedEvals) unblock(computedClass, quota string, index uint64) {
b.l.Lock()
defer b.l.Unlock()
@ -329,12 +433,18 @@ func (b *BlockedEvals) unblock(computedClass string, index uint64) {
// Every eval that has escaped computed node class has to be unblocked
// because any node could potentially be feasible.
numEscaped := len(b.escaped)
numQuotaLimit := 0
unblocked := make(map[*structs.Evaluation]string, lib.MaxInt(numEscaped, 4))
if numEscaped != 0 {
if numEscaped != 0 && computedClass != "" {
for id, wrapped := range b.escaped {
unblocked[wrapped.eval] = wrapped.token
delete(b.escaped, id)
delete(b.jobs, wrapped.eval.JobID)
if wrapped.eval.QuotaLimitReached != "" {
numQuotaLimit++
}
}
}
@ -344,23 +454,31 @@ func (b *BlockedEvals) unblock(computedClass string, index uint64) {
// never saw a node with the given computed class and thus needs to be
// unblocked for correctness.
for id, wrapped := range b.captured {
if elig, ok := wrapped.eval.ClassEligibility[computedClass]; ok && !elig {
if quota != "" && wrapped.eval.QuotaLimitReached != quota {
// We are unblocking based on quota and this eval doesn't match
continue
} else if elig, ok := wrapped.eval.ClassEligibility[computedClass]; ok && !elig {
// Can skip because the eval has explicitly marked the node class
// as ineligible.
continue
}
// The computed node class has never been seen by the eval so we unblock
// it.
// Unblock the evaluation because it is either for the matching quota,
// is eligible based on the computed node class, or never seen the
// computed node class.
unblocked[wrapped.eval] = wrapped.token
delete(b.jobs, wrapped.eval.JobID)
delete(b.captured, id)
if wrapped.eval.QuotaLimitReached != "" {
numQuotaLimit++
}
}
if l := len(unblocked); l != 0 {
// Update the counters
b.stats.TotalEscaped = 0
b.stats.TotalBlocked -= l
b.stats.TotalQuotaLimit -= numQuotaLimit
// Enqueue all the unblocked evals into the broker.
b.evalBroker.EnqueueAll(unblocked)
@ -378,12 +496,16 @@ func (b *BlockedEvals) UnblockFailed() {
return
}
quotaLimit := 0
unblocked := make(map[*structs.Evaluation]string, 4)
for id, wrapped := range b.captured {
if wrapped.eval.TriggeredBy == structs.EvalTriggerMaxPlans {
unblocked[wrapped.eval] = wrapped.token
delete(b.captured, id)
delete(b.jobs, wrapped.eval.JobID)
if wrapped.eval.QuotaLimitReached != "" {
quotaLimit++
}
}
}
@ -393,11 +515,15 @@ func (b *BlockedEvals) UnblockFailed() {
delete(b.escaped, id)
delete(b.jobs, wrapped.eval.JobID)
b.stats.TotalEscaped -= 1
if wrapped.eval.QuotaLimitReached != "" {
quotaLimit++
}
}
}
if l := len(unblocked); l > 0 {
b.stats.TotalBlocked -= l
b.stats.TotalQuotaLimit -= quotaLimit
b.evalBroker.EnqueueAll(unblocked)
}
}
@ -442,9 +568,12 @@ func (b *BlockedEvals) Flush() {
// Reset the blocked eval tracker.
b.stats.TotalEscaped = 0
b.stats.TotalBlocked = 0
b.stats.TotalQuotaLimit = 0
b.captured = make(map[string]wrappedEval)
b.escaped = make(map[string]wrappedEval)
b.jobs = make(map[string]string)
b.unblockIndexes = make(map[string]uint64)
b.timetable = nil
b.duplicates = nil
b.capacityChangeCh = make(chan *capacityUpdate, unblockBuffer)
b.stopCh = make(chan struct{})
@ -462,6 +591,7 @@ func (b *BlockedEvals) Stats() *BlockedStats {
// Copy all the stats
stats.TotalEscaped = b.stats.TotalEscaped
stats.TotalBlocked = b.stats.TotalBlocked
stats.TotalQuotaLimit = b.stats.TotalQuotaLimit
return stats
}
@ -471,6 +601,7 @@ func (b *BlockedEvals) EmitStats(period time.Duration, stopCh chan struct{}) {
select {
case <-time.After(period):
stats := b.Stats()
metrics.SetGauge([]string{"nomad", "blocked_evals", "total_quota_limit"}, float32(stats.TotalQuotaLimit))
metrics.SetGauge([]string{"nomad", "blocked_evals", "total_blocked"}, float32(stats.TotalBlocked))
metrics.SetGauge([]string{"nomad", "blocked_evals", "total_escaped"}, float32(stats.TotalEscaped))
case <-stopCh:
@ -478,3 +609,38 @@ func (b *BlockedEvals) EmitStats(period time.Duration, stopCh chan struct{}) {
}
}
}
// prune is a long lived function that prunes unnecessary objects on a timer.
func (b *BlockedEvals) prune() {
ticker := time.NewTicker(pruneInterval)
defer ticker.Stop()
for {
select {
case <-b.stopCh:
return
case <-ticker.C:
b.pruneUnblockIndexes()
}
}
}
// pruneUnblockIndexes is used to prune any tracked entry that is excessively
// old. This protects againsts unbounded growth of the map.
func (b *BlockedEvals) pruneUnblockIndexes() {
b.l.Lock()
defer b.l.Unlock()
if b.timetable == nil {
return
}
cutoff := time.Now().UTC().Add(-1 * pruneThreshold)
oldThreshold := b.timetable.NearestIndex(cutoff)
for key, index := range b.unblockIndexes {
if index < oldThreshold {
delete(b.unblockIndexes, key)
}
}
}

View File

@ -55,6 +55,22 @@ func TestBlockedEvals_Block_SameJob(t *testing.T) {
}
}
func TestBlockedEvals_Block_Quota(t *testing.T) {
t.Parallel()
blocked, _ := testBlockedEvals(t)
// Create a blocked evals on quota
e := mock.Eval()
e.QuotaLimitReached = "foo"
blocked.Block(e)
// Verify block did track both
bs := blocked.Stats()
if bs.TotalBlocked != 1 || bs.TotalEscaped != 0 || bs.TotalQuotaLimit != 1 {
t.Fatalf("bad: %#v", bs)
}
}
func TestBlockedEvals_Block_PriorUnblocks(t *testing.T) {
t.Parallel()
blocked, _ := testBlockedEvals(t)
@ -263,6 +279,78 @@ func TestBlockedEvals_UnblockUnknown(t *testing.T) {
})
}
func TestBlockedEvals_UnblockEligible_Quota(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Create a blocked eval that is eligible for a particular quota
e := mock.Eval()
e.Status = structs.EvalStatusBlocked
e.QuotaLimitReached = "foo"
blocked.Block(e)
// Verify block caused the eval to be tracked
bs := blocked.Stats()
if bs.TotalBlocked != 1 || bs.TotalQuotaLimit != 1 {
t.Fatalf("bad: %#v", bs)
}
blocked.UnblockQuota("foo", 1000)
testutil.WaitForResult(func() (bool, error) {
// Verify Unblock caused an enqueue
brokerStats := broker.Stats()
if brokerStats.TotalReady != 1 {
return false, fmt.Errorf("bad: %#v", brokerStats)
}
// Verify Unblock updates the stats
bs := blocked.Stats()
if bs.TotalBlocked != 0 || bs.TotalEscaped != 0 || bs.TotalQuotaLimit != 0 {
return false, fmt.Errorf("bad: %#v", bs)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %s", err)
})
}
func TestBlockedEvals_UnblockIneligible_Quota(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Create a blocked eval that is eligible on a specific quota
e := mock.Eval()
e.Status = structs.EvalStatusBlocked
e.QuotaLimitReached = "foo"
blocked.Block(e)
// Verify block caused the eval to be tracked
bs := blocked.Stats()
if bs.TotalBlocked != 1 || bs.TotalQuotaLimit != 1 {
t.Fatalf("bad: %#v", bs)
}
// Should do nothing
blocked.UnblockQuota("bar", 1000)
testutil.WaitForResult(func() (bool, error) {
// Verify Unblock didn't cause an enqueue
brokerStats := broker.Stats()
if brokerStats.TotalReady != 0 {
return false, fmt.Errorf("bad: %#v", brokerStats)
}
bs := blocked.Stats()
if bs.TotalBlocked != 1 || bs.TotalEscaped != 0 || bs.TotalQuotaLimit != 1 {
return false, fmt.Errorf("bad: %#v", bs)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %s", err)
})
}
func TestBlockedEvals_Reblock(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
@ -454,6 +542,42 @@ func TestBlockedEvals_Block_ImmediateUnblock_SeenClass(t *testing.T) {
})
}
// Test the block case in which the eval should be immediately unblocked since
// it a quota has changed that it is using
func TestBlockedEvals_Block_ImmediateUnblock_Quota(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
// Do an unblock prior to blocking
blocked.UnblockQuota("my-quota", 1000)
// Create a blocked eval that is eligible on a specific node class and add
// it to the blocked tracker.
e := mock.Eval()
e.Status = structs.EvalStatusBlocked
e.QuotaLimitReached = "my-quota"
e.SnapshotIndex = 900
blocked.Block(e)
// Verify block caused the eval to be immediately unblocked
bs := blocked.Stats()
if bs.TotalBlocked != 0 && bs.TotalEscaped != 0 && bs.TotalQuotaLimit != 0 {
t.Fatalf("bad: %#v", bs)
}
testutil.WaitForResult(func() (bool, error) {
// Verify Unblock caused an enqueue
brokerStats := broker.Stats()
if brokerStats.TotalReady != 1 {
return false, fmt.Errorf("bad: %#v", brokerStats)
}
return true, nil
}, func(err error) {
t.Fatalf("err: %s", err)
})
}
func TestBlockedEvals_UnblockFailed(t *testing.T) {
t.Parallel()
blocked, broker := testBlockedEvals(t)
@ -471,19 +595,25 @@ func TestBlockedEvals_UnblockFailed(t *testing.T) {
e2.ClassEligibility = map[string]bool{"v1:123": true, "v1:456": false}
blocked.Block(e2)
e3 := mock.Eval()
e3.Status = structs.EvalStatusBlocked
e3.TriggeredBy = structs.EvalTriggerMaxPlans
e3.QuotaLimitReached = "foo"
blocked.Block(e3)
// Trigger an unblock fail
blocked.UnblockFailed()
// Verify UnblockFailed caused the eval to be immediately unblocked
blockedStats := blocked.Stats()
if blockedStats.TotalBlocked != 0 && blockedStats.TotalEscaped != 0 {
t.Fatalf("bad: %#v", blockedStats)
bs := blocked.Stats()
if bs.TotalBlocked != 0 || bs.TotalEscaped != 0 || bs.TotalQuotaLimit != 0 {
t.Fatalf("bad: %#v", bs)
}
testutil.WaitForResult(func() (bool, error) {
// Verify Unblock caused an enqueue
brokerStats := broker.Stats()
if brokerStats.TotalReady != 2 {
if brokerStats.TotalReady != 3 {
return false, fmt.Errorf("bad: %#v", brokerStats)
}
return true, nil
@ -493,9 +623,9 @@ func TestBlockedEvals_UnblockFailed(t *testing.T) {
// Reblock an eval for the same job and check that it gets tracked.
blocked.Block(e)
blockedStats = blocked.Stats()
if blockedStats.TotalBlocked != 1 && blockedStats.TotalEscaped != 1 {
t.Fatalf("bad: %#v", blockedStats)
bs = blocked.Stats()
if bs.TotalBlocked != 1 || bs.TotalEscaped != 1 {
t.Fatalf("bad: %#v", bs)
}
}
@ -523,3 +653,28 @@ func TestBlockedEvals_Untrack(t *testing.T) {
t.Fatalf("bad: %#v", bStats)
}
}
func TestBlockedEvals_Untrack_Quota(t *testing.T) {
t.Parallel()
blocked, _ := testBlockedEvals(t)
// Create a blocked evals and add it to the blocked tracker.
e := mock.Eval()
e.Status = structs.EvalStatusBlocked
e.QuotaLimitReached = "foo"
e.SnapshotIndex = 1000
blocked.Block(e)
// Verify block did track
bs := blocked.Stats()
if bs.TotalBlocked != 1 || bs.TotalEscaped != 0 || bs.TotalQuotaLimit != 1 {
t.Fatalf("bad: %#v", bs)
}
// Untrack and verify
blocked.Untrack(e.JobID)
bs = blocked.Stats()
if bs.TotalBlocked != 0 || bs.TotalEscaped != 0 || bs.TotalQuotaLimit != 0 {
t.Fatalf("bad: %#v", bs)
}
}

View File

@ -25,16 +25,9 @@ type mockBackend struct {
}
func newMockBackend(t *testing.T) *mockBackend {
state, err := state.NewStateStore(os.Stderr)
if err != nil {
t.Fatalf("err: %v", err)
}
if state == nil {
t.Fatalf("missing state")
}
return &mockBackend{
index: 10000,
state: state,
state: state.TestStateStore(t),
}
}

View File

@ -67,11 +67,13 @@ type nomadFSM struct {
evalBroker *EvalBroker
blockedEvals *BlockedEvals
periodicDispatcher *PeriodicDispatch
logOutput io.Writer
logger *log.Logger
state *state.StateStore
timetable *TimeTable
// config is the FSM config
config *FSMConfig
// enterpriseAppliers holds the set of enterprise only LogAppliers
enterpriseAppliers LogAppliers
@ -97,21 +99,44 @@ type nomadSnapshot struct {
type snapshotHeader struct {
}
// FSMConfig is used to configure the FSM
type FSMConfig struct {
// EvalBroker is the evaluation broker evaluations should be added to
EvalBroker *EvalBroker
// Periodic is the periodic job dispatcher that periodic jobs should be
// added/removed from
Periodic *PeriodicDispatch
// BlockedEvals is the blocked eval tracker that blocked evaulations should
// be added to.
Blocked *BlockedEvals
// LogOutput is the writer logs should be written to
LogOutput io.Writer
// Region is the region of the server embedding the FSM
Region string
}
// NewFSMPath is used to construct a new FSM with a blank state
func NewFSM(evalBroker *EvalBroker, periodic *PeriodicDispatch,
blocked *BlockedEvals, logOutput io.Writer) (*nomadFSM, error) {
func NewFSM(config *FSMConfig) (*nomadFSM, error) {
// Create a state store
state, err := state.NewStateStore(logOutput)
sconfig := &state.StateStoreConfig{
LogOutput: config.LogOutput,
Region: config.Region,
}
state, err := state.NewStateStore(sconfig)
if err != nil {
return nil, err
}
fsm := &nomadFSM{
evalBroker: evalBroker,
periodicDispatcher: periodic,
blockedEvals: blocked,
logOutput: logOutput,
logger: log.New(logOutput, "", log.LstdFlags),
evalBroker: config.EvalBroker,
periodicDispatcher: config.Periodic,
blockedEvals: config.Blocked,
logger: log.New(config.LogOutput, "", log.LstdFlags),
config: config,
state: state,
timetable: NewTimeTable(timeTableGranularity, timeTableLimit),
enterpriseAppliers: make(map[structs.MessageType]LogApplier, 8),
@ -568,7 +593,15 @@ func (n *nomadFSM) applyAllocClientUpdate(buf []byte, index uint64) interface{}
return err
}
n.blockedEvals.Unblock(node.ComputedClass, index)
// Unblock any associated quota
quota, err := n.allocQuota(alloc.ID)
if err != nil {
n.logger.Printf("[ERR] nomad.fsm: looking up quota associated with alloc %q failed: %v", alloc.ID, err)
return err
}
n.blockedEvals.UnblockClassAndQuota(node.ComputedClass, quota, index)
}
}
@ -819,7 +852,11 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
defer old.Close()
// Create a new state store
newState, err := state.NewStateStore(n.logOutput)
config := &state.StateStoreConfig{
LogOutput: n.config.LogOutput,
Region: n.config.Region,
}
newState, err := state.NewStateStore(config)
if err != nil {
return err
}

9
nomad/fsm_not_ent.go Normal file
View File

@ -0,0 +1,9 @@
// +build !ent
package nomad
// allocQuota returns the quota object associated with the allocation. In
// anything but Premium this will always be empty
func (n *nomadFSM) allocQuota(allocID string) (string, error) {
return "", nil
}

View File

@ -40,27 +40,27 @@ func (m *MockSink) Close() error {
}
func testStateStore(t *testing.T) *state.StateStore {
state, err := state.NewStateStore(os.Stderr)
if err != nil {
t.Fatalf("err: %v", err)
}
if state == nil {
t.Fatalf("missing state")
}
return state
return state.TestStateStore(t)
}
func testFSM(t *testing.T) *nomadFSM {
p, _ := testPeriodicDispatcher()
broker := testBroker(t, 0)
blocked := NewBlockedEvals(broker)
fsm, err := NewFSM(broker, p, blocked, os.Stderr)
dispatcher, _ := testPeriodicDispatcher()
fsmConfig := &FSMConfig{
EvalBroker: broker,
Periodic: dispatcher,
Blocked: NewBlockedEvals(broker),
LogOutput: os.Stderr,
Region: "global",
}
fsm, err := NewFSM(fsmConfig)
if err != nil {
t.Fatalf("err: %v", err)
}
if fsm == nil {
t.Fatalf("missing fsm")
}
state.TestInitState(t, fsm.state)
return fsm
}

View File

@ -138,6 +138,7 @@ func (s *Server) establishLeadership(stopCh chan struct{}) error {
// Enable the blocked eval tracker, since we are now the leader
s.blockedEvals.SetEnabled(true)
s.blockedEvals.SetTimetable(s.fsm.TimeTable())
// Enable the deployment watcher, since we are now the leader
if err := s.deploymentWatcher.SetEnabled(true, s.State()); err != nil {

View File

@ -3,7 +3,6 @@ package nomad
import (
"errors"
"fmt"
"os"
"testing"
"time"
@ -696,17 +695,13 @@ func TestLeader_ReplicateACLPolicies(t *testing.T) {
func TestLeader_DiffACLPolicies(t *testing.T) {
t.Parallel()
state, err := state.NewStateStore(os.Stderr)
if err != nil {
t.Fatalf("err: %v", err)
}
state := state.TestStateStore(t)
// Populate the local state
p1 := mock.ACLPolicy()
p2 := mock.ACLPolicy()
p3 := mock.ACLPolicy()
err = state.UpsertACLPolicies(100, []*structs.ACLPolicy{p1, p2, p3})
assert.Nil(t, err)
assert.Nil(t, state.UpsertACLPolicies(100, []*structs.ACLPolicy{p1, p2, p3}))
// Simulate a remote list
p2Stub := p2.Stub()
@ -769,10 +764,7 @@ func TestLeader_ReplicateACLTokens(t *testing.T) {
func TestLeader_DiffACLTokens(t *testing.T) {
t.Parallel()
state, err := state.NewStateStore(os.Stderr)
if err != nil {
t.Fatalf("err: %v", err)
}
state := state.TestStateStore(t)
// Populate the local state
p0 := mock.ACLToken()
@ -782,8 +774,7 @@ func TestLeader_DiffACLTokens(t *testing.T) {
p2.Global = true
p3 := mock.ACLToken()
p3.Global = true
err = state.UpsertACLTokens(100, []*structs.ACLToken{p0, p1, p2, p3})
assert.Nil(t, err)
assert.Nil(t, state.UpsertACLTokens(100, []*structs.ACLToken{p0, p1, p2, p3}))
// Simulate a remote list
p2Stub := p2.Stub()

View File

@ -39,6 +39,11 @@ func NodePolicy(policy string) string {
return fmt.Sprintf("node {\n\tpolicy = %q\n}\n", policy)
}
// QuotaPolicy is a helper for generating the hcl for a given quota policy.
func QuotaPolicy(policy string) string {
return fmt.Sprintf("quota {\n\tpolicy = %q\n}\n", policy)
}
// CreatePolicy creates a policy with the given name and rule.
func CreatePolicy(t testing.T, state StateStore, index uint64, name, rule string) {
t.Helper()

View File

@ -74,7 +74,7 @@ func (s *Server) planApply() {
if waitCh == nil || snap == nil {
snap, err = s.fsm.State().Snapshot()
if err != nil {
s.logger.Printf("[ERR] nomad: failed to snapshot state: %v", err)
s.logger.Printf("[ERR] nomad.planner: failed to snapshot state: %v", err)
pending.respond(nil, err)
continue
}
@ -83,7 +83,7 @@ func (s *Server) planApply() {
// Evaluate the plan
result, err := evaluatePlan(pool, snap, pending.plan, s.logger)
if err != nil {
s.logger.Printf("[ERR] nomad: failed to evaluate plan: %v", err)
s.logger.Printf("[ERR] nomad.planner: failed to evaluate plan: %v", err)
pending.respond(nil, err)
continue
}
@ -100,7 +100,7 @@ func (s *Server) planApply() {
<-waitCh
snap, err = s.fsm.State().Snapshot()
if err != nil {
s.logger.Printf("[ERR] nomad: failed to snapshot state: %v", err)
s.logger.Printf("[ERR] nomad.planner: failed to snapshot state: %v", err)
pending.respond(nil, err)
continue
}
@ -109,7 +109,7 @@ func (s *Server) planApply() {
// Dispatch the Raft transaction for the plan
future, err := s.applyPlan(pending.plan, result, snap)
if err != nil {
s.logger.Printf("[ERR] nomad: failed to submit plan: %v", err)
s.logger.Printf("[ERR] nomad.planner: failed to submit plan: %v", err)
pending.respond(nil, err)
continue
}
@ -176,7 +176,7 @@ func (s *Server) asyncPlanWait(waitCh chan struct{}, future raft.ApplyFuture,
// Wait for the plan to apply
if err := future.Error(); err != nil {
s.logger.Printf("[ERR] nomad: failed to apply plan: %v", err)
s.logger.Printf("[ERR] nomad.planner: failed to apply plan: %v", err)
pending.respond(nil, err)
return
}
@ -200,6 +200,30 @@ func (s *Server) asyncPlanWait(waitCh chan struct{}, future raft.ApplyFuture,
func evaluatePlan(pool *EvaluatePool, snap *state.StateSnapshot, plan *structs.Plan, logger *log.Logger) (*structs.PlanResult, error) {
defer metrics.MeasureSince([]string{"nomad", "plan", "evaluate"}, time.Now())
// Check if the plan exceeds quota
overQuota, err := evaluatePlanQuota(snap, plan)
if err != nil {
return nil, err
}
// Reject the plan and force the scheduler to refresh
if overQuota {
index, err := refreshIndex(snap)
if err != nil {
return nil, err
}
logger.Printf("[DEBUG] nomad.planner: plan for evaluation %q exceeds quota limit. Forcing refresh to %d", plan.EvalID, index)
return &structs.PlanResult{RefreshIndex: index}, nil
}
return evaluatePlanPlacements(pool, snap, plan, logger)
}
// evaluatePlanPlacements is used to determine what portions of a plan can be
// applied if any, looking for node over commitment. Returns if there should be
// a plan application which may be partial or if there was an error
func evaluatePlanPlacements(pool *EvaluatePool, snap *state.StateSnapshot, plan *structs.Plan, logger *log.Logger) (*structs.PlanResult, error) {
// Create a result holder for the plan
result := &structs.PlanResult{
NodeUpdate: make(map[string][]*structs.Allocation),
@ -239,7 +263,7 @@ func evaluatePlan(pool *EvaluatePool, snap *state.StateSnapshot, plan *structs.P
if !fit {
// Log the reason why the node's allocations could not be made
if reason != "" {
logger.Printf("[DEBUG] nomad: plan for node %q rejected because: %v", nodeID, reason)
logger.Printf("[DEBUG] nomad.planner: plan for node %q rejected because: %v", nodeID, reason)
}
// Set that this is a partial commit
partialCommit = true
@ -310,18 +334,14 @@ OUTER:
// a minimum refresh index to force the scheduler to work on a more
// up-to-date state to avoid the failures.
if partialCommit {
allocIndex, err := snap.Index("allocs")
index, err := refreshIndex(snap)
if err != nil {
mErr.Errors = append(mErr.Errors, err)
}
nodeIndex, err := snap.Index("nodes")
if err != nil {
mErr.Errors = append(mErr.Errors, err)
}
result.RefreshIndex = maxUint64(nodeIndex, allocIndex)
result.RefreshIndex = index
if result.RefreshIndex == 0 {
err := fmt.Errorf("partialCommit with RefreshIndex of 0 (%d node, %d alloc)", nodeIndex, allocIndex)
err := fmt.Errorf("partialCommit with RefreshIndex of 0")
mErr.Errors = append(mErr.Errors, err)
}

View File

@ -0,0 +1,27 @@
// +build !ent
package nomad
import (
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
)
// refreshIndex returns the index the scheduler should refresh to as the maximum
// of both the allocation and node tables.
func refreshIndex(snap *state.StateSnapshot) (uint64, error) {
allocIndex, err := snap.Index("allocs")
if err != nil {
return 0, err
}
nodeIndex, err := snap.Index("nodes")
if err != nil {
return 0, err
}
return maxUint64(nodeIndex, allocIndex), nil
}
// evaluatePlanQuota returns whether the plan would be over quota
func evaluatePlanQuota(snap *state.StateSnapshot, plan *structs.Plan) (bool, error) {
return false, nil
}

View File

@ -248,6 +248,7 @@ func TestPlanApply_EvalPlan_Simple(t *testing.T) {
alloc := mock.Alloc()
plan := &structs.Plan{
Job: alloc.Job,
NodeAllocation: map[string][]*structs.Allocation{
node.ID: {alloc},
},
@ -300,6 +301,7 @@ func TestPlanApply_EvalPlan_Partial(t *testing.T) {
d.TaskGroups["web"].PlacedCanaries = []string{alloc.ID, alloc2.ID}
plan := &structs.Plan{
Job: alloc.Job,
NodeAllocation: map[string][]*structs.Allocation{
node.ID: {alloc},
node2.ID: {alloc2},
@ -352,6 +354,7 @@ func TestPlanApply_EvalPlan_Partial_AllAtOnce(t *testing.T) {
alloc2 := mock.Alloc() // Ensure alloc2 does not fit
alloc2.Resources = node2.Resources
plan := &structs.Plan{
Job: alloc.Job,
AllAtOnce: true, // Require all to make progress
NodeAllocation: map[string][]*structs.Allocation{
node.ID: {alloc},
@ -398,6 +401,7 @@ func TestPlanApply_EvalNodePlan_Simple(t *testing.T) {
alloc := mock.Alloc()
plan := &structs.Plan{
Job: alloc.Job,
NodeAllocation: map[string][]*structs.Allocation{
node.ID: {alloc},
},
@ -425,6 +429,7 @@ func TestPlanApply_EvalNodePlan_NodeNotReady(t *testing.T) {
alloc := mock.Alloc()
plan := &structs.Plan{
Job: alloc.Job,
NodeAllocation: map[string][]*structs.Allocation{
node.ID: {alloc},
},
@ -452,6 +457,7 @@ func TestPlanApply_EvalNodePlan_NodeDrain(t *testing.T) {
alloc := mock.Alloc()
plan := &structs.Plan{
Job: alloc.Job,
NodeAllocation: map[string][]*structs.Allocation{
node.ID: {alloc},
},
@ -477,6 +483,7 @@ func TestPlanApply_EvalNodePlan_NodeNotExist(t *testing.T) {
nodeID := "12345678-abcd-efab-cdef-123456789abc"
alloc := mock.Alloc()
plan := &structs.Plan{
Job: alloc.Job,
NodeAllocation: map[string][]*structs.Allocation{
nodeID: {alloc},
},
@ -512,6 +519,7 @@ func TestPlanApply_EvalNodePlan_NodeFull(t *testing.T) {
snap, _ := state.Snapshot()
plan := &structs.Plan{
Job: alloc.Job,
NodeAllocation: map[string][]*structs.Allocation{
node.ID: {alloc2},
},
@ -542,6 +550,7 @@ func TestPlanApply_EvalNodePlan_UpdateExisting(t *testing.T) {
snap, _ := state.Snapshot()
plan := &structs.Plan{
Job: alloc.Job,
NodeAllocation: map[string][]*structs.Allocation{
node.ID: {alloc},
},
@ -576,6 +585,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_Evict(t *testing.T) {
allocEvict.DesiredStatus = structs.AllocDesiredStatusEvict
alloc2 := mock.Alloc()
plan := &structs.Plan{
Job: alloc.Job,
NodeUpdate: map[string][]*structs.Allocation{
node.ID: {allocEvict},
},
@ -611,6 +621,7 @@ func TestPlanApply_EvalNodePlan_NodeFull_AllocEvict(t *testing.T) {
alloc2 := mock.Alloc()
plan := &structs.Plan{
Job: alloc.Job,
NodeAllocation: map[string][]*structs.Allocation{
node.ID: {alloc2},
},
@ -645,6 +656,7 @@ func TestPlanApply_EvalNodePlan_NodeDown_EvictOnly(t *testing.T) {
*allocEvict = *alloc
allocEvict.DesiredStatus = structs.AllocDesiredStatusEvict
plan := &structs.Plan{
Job: alloc.Job,
NodeUpdate: map[string][]*structs.Allocation{
node.ID: {allocEvict},
},

View File

@ -35,6 +35,7 @@ func TestPlanEndpoint_Submit(t *testing.T) {
plan := mock.Plan()
plan.EvalID = eval1.ID
plan.EvalToken = token
plan.Job = mock.Job()
req := &structs.PlanRequest{
Plan: plan,
WriteRequest: structs.WriteRequest{Region: "global"},

View File

@ -4,6 +4,7 @@ import (
"strings"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/acl"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
)
@ -76,7 +77,7 @@ func (s *Search) getMatches(iter memdb.ResultIterator, prefix string) ([]string,
// getResourceIter takes a context and returns a memdb iterator specific to
// that context
func getResourceIter(context structs.Context, namespace, prefix string, ws memdb.WatchSet, state *state.StateStore) (memdb.ResultIterator, error) {
func getResourceIter(context structs.Context, aclObj *acl.ACL, namespace, prefix string, ws memdb.WatchSet, state *state.StateStore) (memdb.ResultIterator, error) {
switch context {
case structs.Jobs:
return state.JobsByIDPrefix(ws, namespace, prefix)
@ -89,7 +90,7 @@ func getResourceIter(context structs.Context, namespace, prefix string, ws memdb
case structs.Deployments:
return state.DeploymentsByIDPrefix(ws, namespace, prefix)
default:
return getEnterpriseResourceIter(context, namespace, prefix, ws, state)
return getEnterpriseResourceIter(context, aclObj, namespace, prefix, ws, state)
}
}
@ -139,7 +140,7 @@ func (s *Search) PrefixSearch(args *structs.SearchRequest, reply *structs.Search
contexts := searchContexts(aclObj, namespace, args.Context)
for _, ctx := range contexts {
iter, err := getResourceIter(ctx, namespace, roundUUIDDownIfOdd(args.Prefix, args.Context), ws, state)
iter, err := getResourceIter(ctx, aclObj, namespace, roundUUIDDownIfOdd(args.Prefix, args.Context), ws, state)
if err != nil {
e := err.Error()
switch {
@ -168,7 +169,7 @@ func (s *Search) PrefixSearch(args *structs.SearchRequest, reply *structs.Search
// will be used as the index of the response. Otherwise, the
// maximum index from all resources will be used.
for _, ctx := range contexts {
index, err := state.Index(string(ctx))
index, err := state.Index(contextToIndex(ctx))
if err != nil {
return err
}

View File

@ -17,6 +17,11 @@ var (
allContexts = ossContexts
)
// contextToIndex returns the index name to lookup in the state store.
func contextToIndex(ctx structs.Context) string {
return string(ctx)
}
// getEnterpriseMatch is a no-op in oss since there are no enterprise objects.
func getEnterpriseMatch(match interface{}) (id string, ok bool) {
return "", false
@ -24,7 +29,7 @@ func getEnterpriseMatch(match interface{}) (id string, ok bool) {
// getEnterpriseResourceIter is used to retrieve an iterator over an enterprise
// only table.
func getEnterpriseResourceIter(context structs.Context, namespace, prefix string, ws memdb.WatchSet, state *state.StateStore) (memdb.ResultIterator, error) {
func getEnterpriseResourceIter(context structs.Context, _ *acl.ACL, namespace, prefix string, ws memdb.WatchSet, state *state.StateStore) (memdb.ResultIterator, error) {
// If we have made it here then it is an error since we have exhausted all
// open source contexts.
return nil, fmt.Errorf("context must be one of %v or 'all' for all contexts; got %q", allContexts, context)

View File

@ -806,8 +806,15 @@ func (s *Server) setupRaft() error {
}()
// Create the FSM
fsmConfig := &FSMConfig{
EvalBroker: s.evalBroker,
Periodic: s.periodicDispatcher,
Blocked: s.blockedEvals,
LogOutput: s.config.LogOutput,
Region: s.Region(),
}
var err error
s.fsm, err = NewFSM(s.evalBroker, s.periodicDispatcher, s.blockedEvals, s.config.LogOutput)
s.fsm, err = NewFSM(fsmConfig)
if err != nil {
return err
}
@ -897,7 +904,7 @@ func (s *Server) setupRaft() error {
if err != nil {
return fmt.Errorf("recovery failed to parse peers.json: %v", err)
}
tmpFsm, err := NewFSM(s.evalBroker, s.periodicDispatcher, s.blockedEvals, s.config.LogOutput)
tmpFsm, err := NewFSM(fsmConfig)
if err != nil {
return fmt.Errorf("recovery failed to make temp FSM: %v", err)
}

View File

@ -20,6 +20,15 @@ type IndexEntry struct {
Value uint64
}
// StateStoreConfig is used to configure a new state store
type StateStoreConfig struct {
// LogOutput is used to configure the output of the state store's logs
LogOutput io.Writer
// Region is the region of the server embedding the state store.
Region string
}
// The StateStore is responsible for maintaining all the Nomad
// state. It is manipulated by the FSM which maintains consistency
// through the use of Raft. The goals of the StateStore are to provide
@ -31,13 +40,16 @@ type StateStore struct {
logger *log.Logger
db *memdb.MemDB
// config is the passed in configuration
config *StateStoreConfig
// abandonCh is used to signal watchers that this state store has been
// abandoned (usually during a restore). This is only ever closed.
abandonCh chan struct{}
}
// NewStateStore is used to create a new state store
func NewStateStore(logOutput io.Writer) (*StateStore, error) {
func NewStateStore(config *StateStoreConfig) (*StateStore, error) {
// Create the MemDB
db, err := memdb.NewMemDB(stateStoreSchema())
if err != nil {
@ -46,13 +58,19 @@ func NewStateStore(logOutput io.Writer) (*StateStore, error) {
// Create the state store
s := &StateStore{
logger: log.New(logOutput, "", log.LstdFlags),
logger: log.New(config.LogOutput, "", log.LstdFlags),
db: db,
config: config,
abandonCh: make(chan struct{}),
}
return s, nil
}
// Config returns the state store configuration.
func (s *StateStore) Config() *StateStoreConfig {
return s.config
}
// Snapshot is used to create a point in time snapshot. Because
// we use MemDB, we just need to snapshot the state of the underlying
// database.
@ -60,6 +78,7 @@ func (s *StateStore) Snapshot() (*StateSnapshot, error) {
snap := &StateSnapshot{
StateStore: StateStore{
logger: s.logger,
config: s.config,
db: s.db.Snapshot(),
},
}
@ -1494,14 +1513,14 @@ func (s *StateStore) DeleteEval(index uint64, evals []string, allocs []string) e
}
for _, alloc := range allocs {
existing, err := txn.First("allocs", "id", alloc)
raw, err := txn.First("allocs", "id", alloc)
if err != nil {
return fmt.Errorf("alloc lookup failed: %v", err)
}
if existing == nil {
if raw == nil {
continue
}
if err := txn.Delete("allocs", existing); err != nil {
if err := txn.Delete("allocs", raw); err != nil {
return fmt.Errorf("alloc delete failed: %v", err)
}
}
@ -1707,6 +1726,10 @@ func (s *StateStore) nestedUpdateAllocFromClient(txn *memdb.Txn, index uint64, a
return fmt.Errorf("error updating job summary: %v", err)
}
if err := s.updateEntWithAlloc(index, copyAlloc, exist, txn); err != nil {
return err
}
// Update the allocation
if err := txn.Insert("allocs", copyAlloc); err != nil {
return fmt.Errorf("alloc insert failed: %v", err)
@ -1799,6 +1822,10 @@ func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation
alloc.Namespace = structs.DefaultNamespace
}
// OPTIMIZATION:
// These should be given a map of new to old allocation and the updates
// should be one on all changes. The current implementation causes O(n)
// lookups/copies/insertions rather than O(1)
if err := s.updateDeploymentWithAlloc(index, alloc, exist, txn); err != nil {
return fmt.Errorf("error updating deployment: %v", err)
}
@ -1807,6 +1834,10 @@ func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation
return fmt.Errorf("error updating job summary: %v", err)
}
if err := s.updateEntWithAlloc(index, alloc, exist, txn); err != nil {
return err
}
// Create the EphemeralDisk if it's nil by adding up DiskMB from task resources.
// COMPAT 0.4.1 -> 0.5
if alloc.Job != nil {
@ -2047,7 +2078,12 @@ func (s *StateStore) Allocs(ws memdb.WatchSet) (memdb.ResultIterator, error) {
// namespace
func (s *StateStore) AllocsByNamespace(ws memdb.WatchSet, namespace string) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
return s.allocsByNamespaceImpl(ws, txn, namespace)
}
// allocsByNamespaceImpl returns an iterator over all the allocations in the
// namespace
func (s *StateStore) allocsByNamespaceImpl(ws memdb.WatchSet, txn *memdb.Txn, namespace string) (memdb.ResultIterator, error) {
// Walk the entire table
iter, err := txn.Get("allocs", "namespace", namespace)
if err != nil {

View File

@ -11,3 +11,9 @@ import (
func (s *StateStore) namespaceExists(txn *memdb.Txn, namespace string) (bool, error) {
return namespace == structs.DefaultNamespace, nil
}
// updateEntWithAlloc is used to update Nomad Enterprise objects when an allocation is
// added/modified/deleted
func (s *StateStore) updateEntWithAlloc(index uint64, new, existing *structs.Allocation, txn *memdb.Txn) error {
return nil
}

View File

@ -3,7 +3,6 @@ package state
import (
"context"
"fmt"
"os"
"reflect"
"sort"
"strings"
@ -19,14 +18,7 @@ import (
)
func testStateStore(t *testing.T) *StateStore {
state, err := NewStateStore(os.Stderr)
if err != nil {
t.Fatalf("err: %v", err)
}
if state == nil {
t.Fatalf("missing state")
}
return state
return TestStateStore(t)
}
func TestStateStore_Blocking_Error(t *testing.T) {
@ -2325,13 +2317,24 @@ func TestStateStore_Indexes(t *testing.T) {
out = append(out, raw.(*IndexEntry))
}
expect := []*IndexEntry{
{"nodes", 1000},
expect := &IndexEntry{"nodes", 1000}
if l := len(out); l != 1 && l != 2 {
t.Fatalf("unexpected number of index entries: %v", out)
}
if !reflect.DeepEqual(expect, out) {
t.Fatalf("bad: %#v %#v", expect, out)
for _, index := range out {
if index.Key != expect.Key {
continue
}
if index.Value != expect.Value {
t.Fatalf("bad index; got %d; want %d", index.Value, expect.Value)
}
// We matched
return
}
t.Fatal("did not find expected index entry")
}
func TestStateStore_LatestIndex(t *testing.T) {

23
nomad/state/testing.go Normal file
View File

@ -0,0 +1,23 @@
package state
import (
"os"
"github.com/mitchellh/go-testing-interface"
)
func TestStateStore(t testing.T) *StateStore {
config := &StateStoreConfig{
LogOutput: os.Stderr,
Region: "global",
}
state, err := NewStateStore(config)
if err != nil {
t.Fatalf("err: %v", err)
}
if state == nil {
t.Fatalf("missing state")
}
TestInitState(t, state)
return state
}

View File

@ -0,0 +1,9 @@
// +build !pro,!ent
package state
import (
"github.com/mitchellh/go-testing-interface"
)
func TestInitState(t testing.T, state *StateStore) {}

View File

@ -128,6 +128,7 @@ const (
Jobs Context = "jobs"
Nodes Context = "nodes"
Namespaces Context = "namespaces"
Quotas Context = "quotas"
All Context = "all"
)
@ -2768,6 +2769,17 @@ func (tg *TaskGroup) GoString() string {
return fmt.Sprintf("*%#v", *tg)
}
// CombinedResources returns the combined resources for the task group
func (tg *TaskGroup) CombinedResources() *Resources {
r := &Resources{
DiskMB: tg.EphemeralDisk.SizeMB,
}
for _, task := range tg.Tasks {
r.Add(task.Resources)
}
return r
}
// CheckRestart describes if and when a task should be restarted based on
// failing health checks.
type CheckRestart struct {
@ -4786,6 +4798,9 @@ type AllocMetric struct {
// DimensionExhausted provides the count by dimension or reason
DimensionExhausted map[string]int
// QuotaExhausted provides the exhausted dimensions
QuotaExhausted []string
// Scores is the scores of the final few nodes remaining
// for placement. The top score is typically selected.
Scores map[string]float64
@ -4812,6 +4827,7 @@ func (a *AllocMetric) Copy() *AllocMetric {
na.ConstraintFiltered = helper.CopyMapStringInt(na.ConstraintFiltered)
na.ClassExhausted = helper.CopyMapStringInt(na.ClassExhausted)
na.DimensionExhausted = helper.CopyMapStringInt(na.DimensionExhausted)
na.QuotaExhausted = helper.CopySliceString(na.QuotaExhausted)
na.Scores = helper.CopyMapStringFloat64(na.Scores)
return na
}
@ -4852,6 +4868,14 @@ func (a *AllocMetric) ExhaustedNode(node *Node, dimension string) {
}
}
func (a *AllocMetric) ExhaustQuota(dimensions []string) {
if a.QuotaExhausted == nil {
a.QuotaExhausted = make([]string, 0, len(dimensions))
}
a.QuotaExhausted = append(a.QuotaExhausted, dimensions...)
}
func (a *AllocMetric) ScoreNode(node *Node, name string, score float64) {
if a.Scores == nil {
a.Scores = make(map[string]float64)
@ -5031,6 +5055,10 @@ type Evaluation struct {
// marked as eligible or ineligible.
ClassEligibility map[string]bool
// QuotaLimitReached marks whether a quota limit was reached for the
// evaluation.
QuotaLimitReached string
// EscapedComputedClass marks whether the job has constraints that are not
// captured by computed node classes.
EscapedComputedClass bool
@ -5165,8 +5193,11 @@ func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
// CreateBlockedEval creates a blocked evaluation to followup this eval to place any
// failed allocations. It takes the classes marked explicitly eligible or
// ineligible and whether the job has escaped computed node classes.
func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped bool) *Evaluation {
// ineligible, whether the job has escaped computed node classes and whether the
// quota limit was reached.
func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool,
escaped bool, quotaReached string) *Evaluation {
return &Evaluation{
ID: uuid.Generate(),
Namespace: e.Namespace,
@ -5179,6 +5210,7 @@ func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped
PreviousEval: e.ID,
ClassEligibility: classEligibility,
EscapedComputedClass: escaped,
QuotaLimitReached: quotaReached,
}
}

View File

@ -140,9 +140,20 @@ func shuffleStrings(list []string) {
}
// maxUint64 returns the maximum value
func maxUint64(a, b uint64) uint64 {
if a >= b {
return a
func maxUint64(inputs ...uint64) uint64 {
l := len(inputs)
if l == 0 {
return 0
} else if l == 1 {
return inputs[0]
}
return b
max := inputs[0]
for i := 1; i < l; i++ {
cur := inputs[i]
if cur > max {
max = cur
}
}
return max
}

View File

@ -337,8 +337,10 @@ func TestWorker_SubmitPlan(t *testing.T) {
node := mock.Node()
testRegisterNode(t, s1, node)
job := mock.Job()
eval1 := mock.Eval()
s1.fsm.State().UpsertJobSummary(1000, mock.JobSummary(eval1.JobID))
eval1.JobID = job.ID
s1.fsm.State().UpsertJob(1000, job)
// Create the register request
s1.evalBroker.Enqueue(eval1)
@ -353,8 +355,8 @@ func TestWorker_SubmitPlan(t *testing.T) {
// Create an allocation plan
alloc := mock.Alloc()
s1.fsm.State().UpsertJobSummary(1200, mock.JobSummary(alloc.JobID))
plan := &structs.Plan{
Job: job,
EvalID: eval1.ID,
NodeAllocation: map[string][]*structs.Allocation{
node.ID: {alloc},
@ -399,8 +401,13 @@ func TestWorker_SubmitPlan_MissingNodeRefresh(t *testing.T) {
node := mock.Node()
testRegisterNode(t, s1, node)
// Create the job
job := mock.Job()
s1.fsm.State().UpsertJob(1000, job)
// Create the register request
eval1 := mock.Eval()
eval1.JobID = job.ID
s1.evalBroker.Enqueue(eval1)
evalOut, token, err := s1.evalBroker.Dequeue([]string{eval1.Type}, time.Second)
@ -415,6 +422,7 @@ func TestWorker_SubmitPlan_MissingNodeRefresh(t *testing.T) {
node2 := mock.Node()
alloc := mock.Alloc()
plan := &structs.Plan{
Job: job,
EvalID: eval1.ID,
NodeAllocation: map[string][]*structs.Allocation{
node2.ID: {alloc},

View File

@ -185,6 +185,10 @@ type EvalEligibility struct {
// tgEscapedConstraints is a map of task groups to whether constraints have
// escaped.
tgEscapedConstraints map[string]bool
// quotaReached marks that the quota limit has been reached for the given
// quota
quotaReached string
}
// NewEvalEligibility returns an eligibility tracker for the context of an evaluation.
@ -328,3 +332,14 @@ func (e *EvalEligibility) SetTaskGroupEligibility(eligible bool, tg, class strin
e.taskGroups[tg] = map[string]ComputedClassFeasibility{class: eligibility}
}
}
// SetQuotaLimitReached marks that the quota limit has been reached for the
// given quota
func (e *EvalEligibility) SetQuotaLimitReached(quota string) {
e.quotaReached = quota
}
// QuotaLimitReached returns the quota name if the quota limit has been reached.
func (e *EvalEligibility) QuotaLimitReached() string {
return e.quotaReached
}

View File

@ -13,10 +13,7 @@ import (
)
func testContext(t testing.TB) (*state.StateStore, *EvalContext) {
state, err := state.NewStateStore(os.Stderr)
if err != nil {
t.Fatalf("err: %v", err)
}
state := state.TestStateStore(t)
plan := &structs.Plan{
NodeUpdate: make(map[string][]*structs.Allocation),
NodeAllocation: make(map[string][]*structs.Allocation),

View File

@ -23,6 +23,13 @@ type FeasibleIterator interface {
Reset()
}
// JobContextualIterator is an iterator that can have the job and task group set
// on it.
type ContextualIterator interface {
SetJob(*structs.Job)
SetTaskGroup(*structs.TaskGroup)
}
// FeasibilityChecker is used to check if a single node meets feasibility
// constraints.
type FeasibilityChecker interface {

View File

@ -154,6 +154,7 @@ func (s *GenericScheduler) Process(eval *structs.Evaluation) error {
newEval := s.eval.Copy()
newEval.EscapedComputedClass = e.HasEscaped()
newEval.ClassEligibility = e.GetClasses()
newEval.QuotaLimitReached = e.QuotaLimitReached()
return s.planner.ReblockEval(newEval)
}
@ -175,7 +176,7 @@ func (s *GenericScheduler) createBlockedEval(planFailure bool) error {
classEligibility = e.GetClasses()
}
s.blocked = s.eval.CreateBlockedEval(classEligibility, escaped)
s.blocked = s.eval.CreateBlockedEval(classEligibility, escaped, e.QuotaLimitReached())
if planFailure {
s.blocked.TriggeredBy = structs.EvalTriggerMaxPlans
s.blocked.StatusDescription = blockedEvalMaxPlanDesc

View File

@ -5,6 +5,7 @@ import (
"log"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
)
@ -61,6 +62,9 @@ type Scheduler interface {
// and to enforce complex constraints that require more information than
// is available to a local state scheduler.
type State interface {
// Config returns the configuration of the state store
Config() *state.StateStoreConfig
// Nodes returns an iterator over all the nodes.
// The type of each result is *structs.Node
Nodes(ws memdb.WatchSet) (memdb.ResultIterator, error)

View File

@ -0,0 +1,8 @@
// +build !pro,!ent
package scheduler
// StateEnterprise are the available state store methods for the enterprise
// version.
type StateEnterprise interface {
}

View File

@ -40,6 +40,7 @@ type GenericStack struct {
source *StaticIterator
wrappedChecks *FeasibilityWrapper
quota FeasibleIterator
jobConstraint *ConstraintChecker
taskGroupDrivers *DriverChecker
taskGroupConstraint *ConstraintChecker
@ -65,6 +66,10 @@ func NewGenericStack(batch bool, ctx Context) *GenericStack {
// balancing across eligible nodes.
s.source = NewRandomIterator(ctx, nil)
// Create the quota iterator to determine if placements would result in the
// quota attached to the namespace of the job to go over.
s.quota = NewQuotaIterator(ctx, s.source)
// Attach the job constraints. The job is filled in later.
s.jobConstraint = NewConstraintChecker(ctx, nil)
@ -80,7 +85,7 @@ func NewGenericStack(batch bool, ctx Context) *GenericStack {
// checks that only needs to examine the single node to determine feasibility.
jobs := []FeasibilityChecker{s.jobConstraint}
tgs := []FeasibilityChecker{s.taskGroupDrivers, s.taskGroupConstraint}
s.wrappedChecks = NewFeasibilityWrapper(ctx, s.source, jobs, tgs)
s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs)
// Filter on distinct host constraints.
s.distinctHostsConstraint = NewDistinctHostsIterator(ctx, s.wrappedChecks)
@ -143,6 +148,10 @@ func (s *GenericStack) SetJob(job *structs.Job) {
s.binPack.SetPriority(job.Priority)
s.jobAntiAff.SetJob(job.ID)
s.ctx.Eligibility().SetJob(job)
if contextual, ok := s.quota.(ContextualIterator); ok {
contextual.SetJob(job)
}
}
func (s *GenericStack) Select(tg *structs.TaskGroup) (*RankedNode, *structs.Resources) {
@ -162,6 +171,10 @@ func (s *GenericStack) Select(tg *structs.TaskGroup) (*RankedNode, *structs.Reso
s.wrappedChecks.SetTaskGroup(tg.Name)
s.binPack.SetTaskGroup(tg)
if contextual, ok := s.quota.(ContextualIterator); ok {
contextual.SetTaskGroup(tg)
}
// Find the node with the max score
option := s.maxScore.Next()
@ -196,6 +209,7 @@ type SystemStack struct {
ctx Context
source *StaticIterator
wrappedChecks *FeasibilityWrapper
quota FeasibleIterator
jobConstraint *ConstraintChecker
taskGroupDrivers *DriverChecker
taskGroupConstraint *ConstraintChecker
@ -212,6 +226,10 @@ func NewSystemStack(ctx Context) *SystemStack {
// have to evaluate on all nodes.
s.source = NewStaticIterator(ctx, nil)
// Create the quota iterator to determine if placements would result in the
// quota attached to the namespace of the job to go over.
s.quota = NewQuotaIterator(ctx, s.source)
// Attach the job constraints. The job is filled in later.
s.jobConstraint = NewConstraintChecker(ctx, nil)
@ -227,7 +245,7 @@ func NewSystemStack(ctx Context) *SystemStack {
// checks that only needs to examine the single node to determine feasibility.
jobs := []FeasibilityChecker{s.jobConstraint}
tgs := []FeasibilityChecker{s.taskGroupDrivers, s.taskGroupConstraint}
s.wrappedChecks = NewFeasibilityWrapper(ctx, s.source, jobs, tgs)
s.wrappedChecks = NewFeasibilityWrapper(ctx, s.quota, jobs, tgs)
// Filter on distinct property constraints.
s.distinctPropertyConstraint = NewDistinctPropertyIterator(ctx, s.wrappedChecks)
@ -252,6 +270,10 @@ func (s *SystemStack) SetJob(job *structs.Job) {
s.distinctPropertyConstraint.SetJob(job)
s.binPack.SetPriority(job.Priority)
s.ctx.Eligibility().SetJob(job)
if contextual, ok := s.quota.(ContextualIterator); ok {
contextual.SetJob(job)
}
}
func (s *SystemStack) Select(tg *structs.TaskGroup) (*RankedNode, *structs.Resources) {
@ -270,6 +292,10 @@ func (s *SystemStack) Select(tg *structs.TaskGroup) (*RankedNode, *structs.Resou
s.distinctPropertyConstraint.SetTaskGroup(tg)
s.binPack.SetTaskGroup(tg)
if contextual, ok := s.quota.(ContextualIterator); ok {
contextual.SetTaskGroup(tg)
}
// Get the next option that satisfies the constraints.
option := s.binPack.Next()

View File

@ -0,0 +1,7 @@
// +build !ent
package scheduler
func NewQuotaIterator(ctx Context, source FeasibleIterator) FeasibleIterator {
return source
}

Some files were not shown because too many files have changed in this diff Show More