Sync namespace changes

This commit is contained in:
Alex Dadgar 2017-09-07 16:56:15 -07:00
parent 187c1568aa
commit 84d06f6abe
90 changed files with 3920 additions and 888 deletions

View File

@ -72,6 +72,7 @@ func (a *Allocations) GC(alloc *Allocation, q *QueryOptions) error {
// Allocation is used for serialization of allocations.
type Allocation struct {
ID string
Namespace string
EvalID string
Name string
NodeID string

View File

@ -24,6 +24,9 @@ type QueryOptions struct {
// by the Config
Region string
// Namespace is the target namespace for the query.
Namespace string
// AllowStale allows any Nomad server (non-leader) to service
// a read. This allows for lower latency and higher throughput
AllowStale bool
@ -52,6 +55,9 @@ type WriteOptions struct {
// by the Config
Region string
// Namespace is the target namespace for the write.
Namespace string
// SecretID is the secret ID of an ACL token
SecretID string
}
@ -100,6 +106,9 @@ type Config struct {
// Region to use. If not provided, the default agent region is used.
Region string
// Namespace to use. If not provided the default namespace is used.
Namespace string
// httpClient is the client to use. Default will be used if not provided.
httpClient *http.Client
@ -129,6 +138,7 @@ func (c *Config) ClientConfig(region, address string, tlsEnabled bool) *Config {
config := &Config{
Address: fmt.Sprintf("%s://%s", scheme, address),
Region: region,
Namespace: c.Namespace,
httpClient: defaultConfig.httpClient,
SecretID: c.SecretID,
HttpAuth: c.HttpAuth,
@ -193,6 +203,12 @@ func DefaultConfig() *Config {
if addr := os.Getenv("NOMAD_ADDR"); addr != "" {
config.Address = addr
}
if v := os.Getenv("NOMAD_REGION"); v != "" {
config.Region = v
}
if v := os.Getenv("NOMAD_NAMESPACE"); v != "" {
config.Namespace = v
}
if auth := os.Getenv("NOMAD_HTTP_AUTH"); auth != "" {
var username, password string
if strings.Contains(auth, ":") {
@ -314,6 +330,11 @@ func (c *Client) SetRegion(region string) {
c.config.Region = region
}
// SetNamespace sets the namespace to forward API requests to.
func (c *Client) SetNamespace(namespace string) {
c.config.Namespace = namespace
}
// GetNodeClient returns a new Client that will dial the specified node. If the
// QueryOptions is set, its region will be used.
func (c *Client) GetNodeClient(nodeID string, q *QueryOptions) (*Client, error) {
@ -382,6 +403,9 @@ func (r *request) setQueryOptions(q *QueryOptions) {
if q.Region != "" {
r.params.Set("region", q.Region)
}
if q.Namespace != "" {
r.params.Set("namespace", q.Namespace)
}
if q.SecretID != "" {
r.token = q.SecretID
}
@ -416,6 +440,9 @@ func (r *request) setWriteOptions(q *WriteOptions) {
if q.Region != "" {
r.params.Set("region", q.Region)
}
if q.Namespace != "" {
r.params.Set("namespace", q.Namespace)
}
if q.SecretID != "" {
r.token = q.SecretID
}
@ -482,6 +509,9 @@ func (c *Client) newRequest(method, path string) (*request, error) {
if c.config.Region != "" {
r.params.Set("region", c.config.Region)
}
if c.config.Namespace != "" {
r.params.Set("namespace", c.config.Namespace)
}
if c.config.WaitTime != 0 {
r.params.Set("wait", durToMsec(r.config.WaitTime))
}

View File

@ -115,11 +115,19 @@ func TestDefaultConfig_env(t *testing.T) {
t.Parallel()
url := "http://1.2.3.4:5678"
auth := []string{"nomaduser", "12345"}
region := "test"
namespace := "dev"
token := "foobar"
os.Setenv("NOMAD_ADDR", url)
defer os.Setenv("NOMAD_ADDR", "")
os.Setenv("NOMAD_REGION", region)
defer os.Setenv("NOMAD_REGION", "")
os.Setenv("NOMAD_NAMESPACE", namespace)
defer os.Setenv("NOMAD_NAMESPACE", "")
os.Setenv("NOMAD_HTTP_AUTH", strings.Join(auth, ":"))
defer os.Setenv("NOMAD_HTTP_AUTH", "")
@ -132,6 +140,14 @@ func TestDefaultConfig_env(t *testing.T) {
t.Errorf("expected %q to be %q", config.Address, url)
}
if config.Region != region {
t.Errorf("expected %q to be %q", config.Region, region)
}
if config.Namespace != namespace {
t.Errorf("expected %q to be %q", config.Namespace, namespace)
}
if config.HttpAuth.Username != auth[0] {
t.Errorf("expected %q to be %q", config.HttpAuth.Username, auth[0])
}
@ -153,6 +169,7 @@ func TestSetQueryOptions(t *testing.T) {
r, _ := c.newRequest("GET", "/v1/jobs")
q := &QueryOptions{
Region: "foo",
Namespace: "bar",
AllowStale: true,
WaitIndex: 1000,
WaitTime: 100 * time.Second,
@ -163,6 +180,9 @@ func TestSetQueryOptions(t *testing.T) {
if r.params.Get("region") != "foo" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("namespace") != "bar" {
t.Fatalf("bad: %v", r.params)
}
if _, ok := r.params["stale"]; !ok {
t.Fatalf("bad: %v", r.params)
}
@ -185,6 +205,7 @@ func TestSetWriteOptions(t *testing.T) {
r, _ := c.newRequest("GET", "/v1/jobs")
q := &WriteOptions{
Region: "foo",
Namespace: "bar",
SecretID: "foobar",
}
r.setWriteOptions(q)
@ -192,6 +213,9 @@ func TestSetWriteOptions(t *testing.T) {
if r.params.Get("region") != "foo" {
t.Fatalf("bad: %v", r.params)
}
if r.params.Get("namespace") != "bar" {
t.Fatalf("bad: %v", r.params)
}
if r.token != "foobar" {
t.Fatalf("bad: %v", r.token)
}
@ -205,6 +229,7 @@ func TestRequestToHTTP(t *testing.T) {
r, _ := c.newRequest("DELETE", "/v1/jobs/foo")
q := &QueryOptions{
Region: "foo",
Namespace: "bar",
SecretID: "foobar",
}
r.setQueryOptions(q)
@ -216,7 +241,7 @@ func TestRequestToHTTP(t *testing.T) {
if req.Method != "DELETE" {
t.Fatalf("bad: %v", req)
}
if req.URL.RequestURI() != "/v1/jobs/foo?region=foo" {
if req.URL.RequestURI() != "/v1/jobs/foo?namespace=bar&region=foo" {
t.Fatalf("bad: %v", req)
}
if req.Header.Get("X-Nomad-Token") != "foobar" {
@ -272,7 +297,10 @@ func TestQueryString(t *testing.T) {
defer s.Stop()
r, _ := c.newRequest("PUT", "/v1/abc?foo=bar&baz=zip")
q := &WriteOptions{Region: "foo"}
q := &WriteOptions{
Region: "foo",
Namespace: "bar",
}
r.setWriteOptions(q)
req, err := r.toHTTP()
@ -280,7 +308,7 @@ func TestQueryString(t *testing.T) {
t.Fatalf("err: %s", err)
}
if uri := req.URL.RequestURI(); uri != "/v1/abc?baz=zip&foo=bar&region=foo" {
if uri := req.URL.RequestURI(); uri != "/v1/abc?baz=zip&foo=bar&namespace=bar&region=foo" {
t.Fatalf("bad uri: %q", uri)
}
}

View File

@ -9,5 +9,6 @@ const (
Evals Context = "evals"
Jobs Context = "jobs"
Nodes Context = "nodes"
Namespaces Context = "namespaces"
All Context = "all"
)

View File

@ -14,7 +14,7 @@ func (c *Client) Deployments() *Deployments {
return &Deployments{client: c}
}
// List is used to dump all of the evaluations.
// List is used to dump all of the deployments.
func (d *Deployments) List(q *QueryOptions) ([]*Deployment, *QueryMeta, error) {
var resp []*Deployment
qm, err := d.client.query("/v1/deployments", &resp, q)
@ -29,7 +29,7 @@ func (d *Deployments) PrefixList(prefix string) ([]*Deployment, *QueryMeta, erro
return d.List(&QueryOptions{Prefix: prefix})
}
// Info is used to query a single evaluation by its ID.
// Info is used to query a single deployment by its ID.
func (d *Deployments) Info(deploymentID string, q *QueryOptions) (*Deployment, *QueryMeta, error) {
var resp Deployment
qm, err := d.client.query("/v1/deployment/"+deploymentID, &resp, q)
@ -125,6 +125,7 @@ func (d *Deployments) SetAllocHealth(deploymentID string, healthy, unhealthy []s
// Deployment is used to serialize an deployment.
type Deployment struct {
ID string
Namespace string
JobID string
JobVersion uint64
JobModifyIndex uint64

View File

@ -58,6 +58,7 @@ type Evaluation struct {
Priority int
Type string
TriggeredBy string
Namespace string
JobID string
JobModifyIndex uint64
NodeID string

View File

@ -21,6 +21,9 @@ const (
// PeriodicSpecCron is used for a cron spec.
PeriodicSpecCron = "cron"
// DefaultNamespace is the default namespace.
DefaultNamespace = "default"
)
const (
@ -500,6 +503,7 @@ type ParameterizedJobConfig struct {
type Job struct {
Stop *bool
Region *string
Namespace *string
ID *string
ParentID *string
Name *string
@ -545,6 +549,9 @@ func (j *Job) Canonicalize() {
if j.ParentID == nil {
j.ParentID = helper.StringToPtr("")
}
if j.Namespace == nil {
j.Namespace = helper.StringToPtr(DefaultNamespace)
}
if j.Priority == nil {
j.Priority = helper.IntToPtr(50)
}
@ -554,6 +561,9 @@ func (j *Job) Canonicalize() {
if j.Region == nil {
j.Region = helper.StringToPtr("global")
}
if j.Namespace == nil {
j.Namespace = helper.StringToPtr("default")
}
if j.Type == nil {
j.Type = helper.StringToPtr("service")
}
@ -599,6 +609,7 @@ func (j *Job) Canonicalize() {
// JobSummary summarizes the state of the allocations of a job
type JobSummary struct {
JobID string
Namespace string
Summary map[string]TaskGroupSummary
Children *JobChildrenSummary
@ -730,6 +741,9 @@ type WriteRequest struct {
// The target region for this write
Region string
// Namespace is the target namespace for this write
Namespace string
// SecretID is the secret ID of an ACL token
SecretID string
}

View File

@ -104,6 +104,7 @@ func TestJobs_Canonicalize(t *testing.T) {
ID: helper.StringToPtr(""),
Name: helper.StringToPtr(""),
Region: helper.StringToPtr("global"),
Namespace: helper.StringToPtr(DefaultNamespace),
Type: helper.StringToPtr("service"),
ParentID: helper.StringToPtr(""),
Priority: helper.IntToPtr(50),
@ -147,6 +148,7 @@ func TestJobs_Canonicalize(t *testing.T) {
name: "partial",
input: &Job{
Name: helper.StringToPtr("foo"),
Namespace: helper.StringToPtr("bar"),
ID: helper.StringToPtr("bar"),
ParentID: helper.StringToPtr("lol"),
TaskGroups: []*TaskGroup{
@ -161,6 +163,7 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
expected: &Job{
Namespace: helper.StringToPtr("bar"),
ID: helper.StringToPtr("bar"),
Name: helper.StringToPtr("foo"),
Region: helper.StringToPtr("global"),
@ -284,6 +287,7 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
expected: &Job{
Namespace: helper.StringToPtr(DefaultNamespace),
ID: helper.StringToPtr("example_template"),
Name: helper.StringToPtr("example_template"),
ParentID: helper.StringToPtr(""),
@ -420,6 +424,7 @@ func TestJobs_Canonicalize(t *testing.T) {
Periodic: &PeriodicConfig{},
},
expected: &Job{
Namespace: helper.StringToPtr(DefaultNamespace),
ID: helper.StringToPtr("bar"),
ParentID: helper.StringToPtr(""),
Name: helper.StringToPtr("bar"),
@ -489,6 +494,7 @@ func TestJobs_Canonicalize(t *testing.T) {
},
},
expected: &Job{
Namespace: helper.StringToPtr(DefaultNamespace),
ID: helper.StringToPtr("bar"),
Name: helper.StringToPtr("foo"),
Region: helper.StringToPtr("global"),

90
api/namespace.go Normal file
View File

@ -0,0 +1,90 @@
package api
import (
"fmt"
"sort"
)
// Namespaces is used to query the namespace endpoints.
type Namespaces struct {
client *Client
}
// Namespaces returns a new handle on the namespaces.
func (c *Client) Namespaces() *Namespaces {
return &Namespaces{client: c}
}
// List is used to dump all of the namespaces.
func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) {
var resp []*Namespace
qm, err := n.client.query("/v1/namespaces", &resp, q)
if err != nil {
return nil, nil, err
}
sort.Sort(NamespaceIndexSort(resp))
return resp, qm, nil
}
// PrefixList is used to do a PrefixList search over namespaces
func (n *Namespaces) PrefixList(prefix string, q *QueryOptions) ([]*Namespace, *QueryMeta, error) {
if q == nil {
q = &QueryOptions{Prefix: prefix}
} else {
q.Prefix = prefix
}
return n.List(q)
}
// Info is used to query a single namespace by its name.
func (n *Namespaces) Info(name string, q *QueryOptions) (*Namespace, *QueryMeta, error) {
var resp Namespace
qm, err := n.client.query("/v1/namespace/"+name, &resp, q)
if err != nil {
return nil, nil, err
}
return &resp, qm, nil
}
// Register is used to register a namespace.
func (n *Namespaces) Register(namespace *Namespace, q *WriteOptions) (*WriteMeta, error) {
wm, err := n.client.write("/v1/namespace", namespace, nil, q)
if err != nil {
return nil, err
}
return wm, nil
}
// Delete is used to delete a namespace
func (n *Namespaces) Delete(namespace string, q *WriteOptions) (*WriteMeta, error) {
wm, err := n.client.delete(fmt.Sprintf("/v1/namespace/%s", namespace), nil, q)
if err != nil {
return nil, err
}
return wm, nil
}
// Namespace is used to serialize a namespace.
type Namespace struct {
Name string
Description string
CreateIndex uint64
ModifyIndex uint64
}
// NamespaceIndexSort is a wrapper to sort Namespaces by CreateIndex. We
// reverse the test so that we get the highest index first.
type NamespaceIndexSort []*Namespace
func (n NamespaceIndexSort) Len() int {
return len(n)
}
func (n NamespaceIndexSort) Less(i, j int) bool {
return n[i].CreateIndex > n[j].CreateIndex
}
func (n NamespaceIndexSort) Swap(i, j int) {
n[i], n[j] = n[j], n[i]
}

145
api/namespace_test.go Normal file
View File

@ -0,0 +1,145 @@
// +build pro ent
package api
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNamespaces_Register(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
namespaces := c.Namespaces()
// Create a namespace and register it
ns := testNamespace()
wm, err := namespaces.Register(ns, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the jobs back out again
resp, qm, err := namespaces.List(nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
assert.Equal(ns.Name, resp[0].Name)
assert.Equal("default", resp[1].Name)
}
func TestNamespaces_Register_Invalid(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
namespaces := c.Namespaces()
// Create an invalid namespace and register it
ns := testNamespace()
ns.Name = "*"
_, err := namespaces.Register(ns, nil)
assert.NotNil(err)
}
func TestNamespace_Info(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
namespaces := c.Namespaces()
// Trying to retrieve a namespace before it exists returns an error
_, _, err := namespaces.Info("foo", nil)
assert.Nil(err)
assert.Contains("not found", err.Error())
// Register the namespace
ns := testNamespace()
wm, err := namespaces.Register(ns, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the namespace again and ensure it exists
result, qm, err := namespaces.Info(ns.Name, nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.NotNil(result)
assert.Equal(ns.Name, result.Name)
}
func TestNamespaces_Delete(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
namespaces := c.Namespaces()
// Create a namespace and register it
ns := testNamespace()
wm, err := namespaces.Register(ns, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the namespace back out again
resp, qm, err := namespaces.List(nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
assert.Equal(ns.Name, resp[0].Name)
assert.Equal("default", resp[1].Name)
// Delete the namespace
wm, err = namespaces.Delete(ns.Name, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the namespaces back out again
resp, qm, err = namespaces.List(nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 1)
assert.Equal("default", resp[0].Name)
}
func TestNamespaces_List(t *testing.T) {
t.Parallel()
assert := assert.New(t)
c, s := makeClient(t, nil, nil)
defer s.Stop()
namespaces := c.Namespaces()
// Create two namespaces and register them
ns1 := testNamespace()
ns2 := testNamespace()
ns1.Name = "fooaaa"
ns2.Name = "foobbb"
wm, err := namespaces.Register(ns1, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
wm, err = namespaces.Register(ns2, nil)
assert.Nil(err)
assertWriteMeta(t, wm)
// Query the namespaces
resp, qm, err := namespaces.List(nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 3)
// Query the namespaces using a prefix
resp, qm, err = namespaces.PrefixList("foo", nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 2)
// Query the namespaces using a prefix
resp, qm, err = namespaces.PrefixList("foob", nil)
assert.Nil(err)
assertQueryMeta(t, qm)
assert.Len(resp, 1)
assert.Equal(ns2.Name, resp[0].Name)
}

View File

@ -55,3 +55,10 @@ func testPeriodicJob() *Job {
})
return job
}
func testNamespace() *Namespace {
return &Namespace{
Name: "test-namespace",
Description: "Testing namespaces",
}
}

View File

@ -84,7 +84,7 @@ func (s *HTTPServer) aclPolicyUpdate(resp http.ResponseWriter, req *http.Request
args := structs.ACLPolicyUpsertRequest{
Policies: []*structs.ACLPolicy{&policy},
}
s.parseWrite(req, &args.WriteRequest)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.GenericResponse
if err := s.agent.RPC("ACL.UpsertPolicies", &args, &out); err != nil {
@ -100,7 +100,7 @@ func (s *HTTPServer) aclPolicyDelete(resp http.ResponseWriter, req *http.Request
args := structs.ACLPolicyDeleteRequest{
Names: []string{policyName},
}
s.parseWrite(req, &args.WriteRequest)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.GenericResponse
if err := s.agent.RPC("ACL.DeletePolicies", &args, &out); err != nil {
@ -140,7 +140,7 @@ func (s *HTTPServer) ACLTokenBootstrap(resp http.ResponseWriter, req *http.Reque
// Format the request
args := structs.ACLTokenBootstrapRequest{}
s.parseWrite(req, &args.WriteRequest)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.ACLTokenUpsertResponse
if err := s.agent.RPC("ACL.Bootstrap", &args, &out); err != nil {
@ -220,7 +220,7 @@ func (s *HTTPServer) aclTokenUpdate(resp http.ResponseWriter, req *http.Request,
args := structs.ACLTokenUpsertRequest{
Tokens: []*structs.ACLToken{&token},
}
s.parseWrite(req, &args.WriteRequest)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.ACLTokenUpsertResponse
if err := s.agent.RPC("ACL.UpsertTokens", &args, &out); err != nil {
@ -239,7 +239,7 @@ func (s *HTTPServer) aclTokenDelete(resp http.ResponseWriter, req *http.Request,
args := structs.ACLTokenDeleteRequest{
AccessorIDs: []string{tokenAccessor},
}
s.parseWrite(req, &args.WriteRequest)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.GenericResponse
if err := s.agent.RPC("ACL.DeleteTokens", &args, &out); err != nil {

View File

@ -60,7 +60,7 @@ func (s *HTTPServer) deploymentFail(resp http.ResponseWriter, req *http.Request,
args := structs.DeploymentFailRequest{
DeploymentID: deploymentID,
}
s.parseRegion(req, &args.Region)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.DeploymentUpdateResponse
if err := s.agent.RPC("Deployment.Fail", &args, &out); err != nil {
@ -85,7 +85,7 @@ func (s *HTTPServer) deploymentPause(resp http.ResponseWriter, req *http.Request
if pauseRequest.DeploymentID != deploymentID {
return nil, CodedError(400, "Deployment ID does not match")
}
s.parseRegion(req, &pauseRequest.Region)
s.parseWriteRequest(req, &pauseRequest.WriteRequest)
var out structs.DeploymentUpdateResponse
if err := s.agent.RPC("Deployment.Pause", &pauseRequest, &out); err != nil {
@ -110,7 +110,7 @@ func (s *HTTPServer) deploymentPromote(resp http.ResponseWriter, req *http.Reque
if promoteRequest.DeploymentID != deploymentID {
return nil, CodedError(400, "Deployment ID does not match")
}
s.parseRegion(req, &promoteRequest.Region)
s.parseWriteRequest(req, &promoteRequest.WriteRequest)
var out structs.DeploymentUpdateResponse
if err := s.agent.RPC("Deployment.Promote", &promoteRequest, &out); err != nil {
@ -135,7 +135,7 @@ func (s *HTTPServer) deploymentSetAllocHealth(resp http.ResponseWriter, req *htt
if healthRequest.DeploymentID != deploymentID {
return nil, CodedError(400, "Deployment ID does not match")
}
s.parseRegion(req, &healthRequest.Region)
s.parseWriteRequest(req, &healthRequest.WriteRequest)
var out structs.DeploymentUpdateResponse
if err := s.agent.RPC("Deployment.SetAllocHealth", &healthRequest, &out); err != nil {

View File

@ -159,7 +159,10 @@ func TestHTTP_DeploymentPause(t *testing.T) {
args := structs.DeploymentPauseRequest{
DeploymentID: d.ID,
Pause: false,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
buf := encodeReq(args)
@ -197,7 +200,10 @@ func TestHTTP_DeploymentPromote(t *testing.T) {
args := structs.DeploymentPromoteRequest{
DeploymentID: d.ID,
All: true,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
buf := encodeReq(args)
@ -239,7 +245,10 @@ func TestHTTP_DeploymentAllocHealth(t *testing.T) {
args := structs.DeploymentAllocHealthRequest{
DeploymentID: d.ID,
HealthyAllocationIDs: []string{a.ID},
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
buf := encodeReq(args)

View File

@ -191,6 +191,9 @@ func (s *HTTPServer) registerHandlers(enableDebug bool) {
s.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
s.mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
// Register enterprise endpoints.
s.registerEnterpriseHandlers()
}
// HTTPCodedError is used to provide the HTTP error code
@ -361,6 +364,15 @@ func (s *HTTPServer) parseRegion(req *http.Request, r *string) {
}
}
// parseNamespace is used to parse the ?namespace parameter
func parseNamespace(req *http.Request, n *string) {
if other := req.URL.Query().Get("namespace"); other != "" {
*n = other
} else if *n == "" {
*n = structs.DefaultNamespace
}
}
// parseToken is used to parse the X-Nomad-Token param
func (s *HTTPServer) parseToken(req *http.Request, token *string) {
if other := req.Header.Get("X-Nomad-Token"); other != "" {
@ -369,17 +381,20 @@ func (s *HTTPServer) parseToken(req *http.Request, token *string) {
}
}
// parseWrite is a convenience method for endpoints that call write methods
func (s *HTTPServer) parseWrite(req *http.Request, b *structs.WriteRequest) {
s.parseRegion(req, &b.Region)
s.parseToken(req, &b.SecretID)
}
// parse is a convenience method for endpoints that need to parse multiple flags
func (s *HTTPServer) parse(resp http.ResponseWriter, req *http.Request, r *string, b *structs.QueryOptions) bool {
s.parseRegion(req, r)
s.parseToken(req, &b.SecretID)
parseConsistency(req, b)
parsePrefix(req, b)
parseNamespace(req, &b.Namespace)
return parseWait(resp, req, b)
}
// parseWriteRequest is a convience method for endpoints that need to parse a
// write request.
func (s *HTTPServer) parseWriteRequest(req *http.Request, w *structs.WriteRequest) {
parseNamespace(req, &w.Namespace)
s.parseToken(req, &w.SecretID)
s.parseRegion(req, &w.Region)
}

View File

@ -0,0 +1,6 @@
// +build !pro,!ent
package agent
// registerEnterpriseHandlers is a no-op for the oss release
func (s *HTTPServer) registerEnterpriseHandlers() {}

View File

@ -92,7 +92,7 @@ func (s *HTTPServer) jobForceEvaluate(resp http.ResponseWriter, req *http.Reques
args := structs.JobEvaluateRequest{
JobID: jobName,
}
s.parseRegion(req, &args.Region)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.JobRegisterResponse
if err := s.agent.RPC("Job.Evaluate", &args, &out); err != nil {
@ -121,7 +121,6 @@ func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request,
if jobName != "" && *args.Job.ID != jobName {
return nil, CodedError(400, "Job ID does not match")
}
s.parseRegion(req, &args.Region)
sJob := ApiJobToStructJob(args.Job)
planReq := structs.JobPlanRequest{
@ -131,6 +130,7 @@ func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request,
Region: args.WriteRequest.Region,
},
}
s.parseWriteRequest(req, &planReq.WriteRequest)
var out structs.JobPlanResponse
if err := s.agent.RPC("Job.Plan", &planReq, &out); err != nil {
return nil, err
@ -160,7 +160,7 @@ func (s *HTTPServer) ValidateJobRequest(resp http.ResponseWriter, req *http.Requ
Region: validateRequest.Region,
},
}
s.parseRegion(req, &args.Region)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.JobValidateResponse
if err := s.agent.RPC("Job.Validate", &args, &out); err != nil {
@ -179,7 +179,7 @@ func (s *HTTPServer) periodicForceRequest(resp http.ResponseWriter, req *http.Re
args := structs.PeriodicForceRequest{
JobID: jobName,
}
s.parseRegion(req, &args.Region)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.PeriodicForceResponse
if err := s.agent.RPC("Periodic.Force", &args, &out); err != nil {
@ -348,8 +348,6 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
if jobName != "" && *args.Job.ID != jobName {
return nil, CodedError(400, "Job ID does not match name")
}
s.parseRegion(req, &args.Region)
s.parseToken(req, &args.SecretID)
sJob := ApiJobToStructJob(args.Job)
@ -362,6 +360,7 @@ func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
SecretID: args.WriteRequest.SecretID,
},
}
s.parseWriteRequest(req, &regReq.WriteRequest)
var out structs.JobRegisterResponse
if err := s.agent.RPC("Job.Register", &regReq, &out); err != nil {
return nil, err
@ -387,7 +386,7 @@ func (s *HTTPServer) jobDelete(resp http.ResponseWriter, req *http.Request,
JobID: jobName,
Purge: purgeBool,
}
s.parseRegion(req, &args.Region)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.JobDeregisterResponse
if err := s.agent.RPC("Job.Deregister", &args, &out); err != nil {
@ -449,7 +448,7 @@ func (s *HTTPServer) jobRevert(resp http.ResponseWriter, req *http.Request,
return nil, CodedError(400, "Job ID does not match")
}
s.parseRegion(req, &revertRequest.Region)
s.parseWriteRequest(req, &revertRequest.WriteRequest)
var out structs.JobRegisterResponse
if err := s.agent.RPC("Job.Revert", &revertRequest, &out); err != nil {
@ -478,7 +477,7 @@ func (s *HTTPServer) jobStable(resp http.ResponseWriter, req *http.Request,
return nil, CodedError(400, "Job ID does not match")
}
s.parseRegion(req, &stableRequest.Region)
s.parseWriteRequest(req, &stableRequest.WriteRequest)
var out structs.JobStabilityResponse
if err := s.agent.RPC("Job.Stable", &stableRequest, &out); err != nil {
@ -525,7 +524,7 @@ func (s *HTTPServer) jobDispatchRequest(resp http.ResponseWriter, req *http.Requ
args.JobID = name
}
s.parseRegion(req, &args.Region)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.JobDispatchResponse
if err := s.agent.RPC("Job.Dispatch", &args, &out); err != nil {
@ -541,6 +540,7 @@ func ApiJobToStructJob(job *api.Job) *structs.Job {
j := &structs.Job{
Stop: *job.Stop,
Region: *job.Region,
Namespace: *job.Namespace,
ID: *job.ID,
ParentID: *job.ParentID,
Name: *job.Name,

View File

@ -25,7 +25,10 @@ func TestHTTP_JobsList(t *testing.T) {
job := mock.Job()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
@ -80,7 +83,10 @@ func TestHTTP_PrefixJobsList(t *testing.T) {
job.TaskGroups[0].Count = 1
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
@ -158,7 +164,10 @@ func TestHTTP_JobsRegister(t *testing.T) {
// Check the job is registered
getReq := structs.JobSpecificRequest{
JobID: *job.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var getResp structs.SingleJobResponse
if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil {
@ -244,7 +253,10 @@ func TestHTTP_JobsRegister_Defaulting(t *testing.T) {
// Check the job is registered
getReq := structs.JobSpecificRequest{
JobID: *job.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var getResp structs.SingleJobResponse
if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil {
@ -267,7 +279,10 @@ func TestHTTP_JobQuery(t *testing.T) {
job := mock.Job()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
@ -367,7 +382,10 @@ func TestHTTP_JobUpdate(t *testing.T) {
job := api.MockJob()
args := api.JobRegisterRequest{
Job: job,
WriteRequest: api.WriteRequest{Region: "global"},
WriteRequest: api.WriteRequest{
Region: "global",
Namespace: api.DefaultNamespace,
},
}
buf := encodeReq(args)
@ -398,7 +416,10 @@ func TestHTTP_JobUpdate(t *testing.T) {
// Check the job is registered
getReq := structs.JobSpecificRequest{
JobID: *job.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var getResp structs.SingleJobResponse
if err := s.Agent.RPC("Job.GetJob", &getReq, &getResp); err != nil {
@ -418,7 +439,10 @@ func TestHTTP_JobDelete(t *testing.T) {
job := mock.Job()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
@ -452,7 +476,10 @@ func TestHTTP_JobDelete(t *testing.T) {
// Check the job is still queryable
getReq1 := structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var getResp1 structs.SingleJobResponse
if err := s.Agent.RPC("Job.GetJob", &getReq1, &getResp1); err != nil {
@ -492,7 +519,10 @@ func TestHTTP_JobDelete(t *testing.T) {
// Check the job is gone
getReq2 := structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var getResp2 structs.SingleJobResponse
if err := s.Agent.RPC("Job.GetJob", &getReq2, &getResp2); err != nil {
@ -511,7 +541,10 @@ func TestHTTP_JobForceEvaluate(t *testing.T) {
job := mock.Job()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
@ -551,7 +584,10 @@ func TestHTTP_JobEvaluations(t *testing.T) {
job := mock.Job()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
@ -599,7 +635,10 @@ func TestHTTP_JobAllocations(t *testing.T) {
alloc1 := mock.Alloc()
args := structs.JobRegisterRequest{
Job: alloc1.Job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
@ -653,7 +692,10 @@ func TestHTTP_JobDeployments(t *testing.T) {
j := mock.Job()
args := structs.JobRegisterRequest{
Job: j,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
assert.Nil(s.Agent.RPC("Job.Register", &args, &resp), "JobRegister")
@ -692,7 +734,10 @@ func TestHTTP_JobDeployment(t *testing.T) {
j := mock.Job()
args := structs.JobRegisterRequest{
Job: j,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
assert.Nil(s.Agent.RPC("Job.Register", &args, &resp), "JobRegister")
@ -730,7 +775,10 @@ func TestHTTP_JobVersions(t *testing.T) {
job := mock.Job()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
@ -743,7 +791,10 @@ func TestHTTP_JobVersions(t *testing.T) {
args2 := structs.JobRegisterRequest{
Job: job2,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp2 structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args2, &resp2); err != nil {
@ -802,7 +853,10 @@ func TestHTTP_PeriodicForce(t *testing.T) {
job := mock.PeriodicJob()
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
@ -843,7 +897,10 @@ func TestHTTP_JobPlan(t *testing.T) {
args := api.JobPlanRequest{
Job: job,
Diff: true,
WriteRequest: api.WriteRequest{Region: "global"},
WriteRequest: api.WriteRequest{
Region: "global",
Namespace: api.DefaultNamespace,
},
}
buf := encodeReq(args)
@ -882,7 +939,10 @@ func TestHTTP_JobDispatch(t *testing.T) {
args := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &args, &resp); err != nil {
@ -892,7 +952,10 @@ func TestHTTP_JobDispatch(t *testing.T) {
// Make the request
respW := httptest.NewRecorder()
args2 := structs.JobDispatchRequest{
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
buf := encodeReq(args2)
@ -928,7 +991,10 @@ func TestHTTP_JobRevert(t *testing.T) {
job := mock.Job()
regReq := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var regResp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &regReq, &regResp); err != nil {
@ -944,7 +1010,10 @@ func TestHTTP_JobRevert(t *testing.T) {
args := structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
buf := encodeReq(args)
@ -981,7 +1050,10 @@ func TestHTTP_JobStable(t *testing.T) {
job := mock.Job()
regReq := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var regResp structs.JobRegisterResponse
if err := s.Agent.RPC("Job.Register", &regReq, &regResp); err != nil {
@ -996,7 +1068,10 @@ func TestHTTP_JobStable(t *testing.T) {
JobID: job.ID,
JobVersion: 0,
Stable: true,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
buf := encodeReq(args)
@ -1030,6 +1105,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
apiJob := &api.Job{
Stop: helper.BoolToPtr(true),
Region: helper.StringToPtr("global"),
Namespace: helper.StringToPtr("foo"),
ID: helper.StringToPtr("foo"),
ParentID: helper.StringToPtr("lol"),
Name: helper.StringToPtr("name"),
@ -1224,6 +1300,7 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) {
expected := &structs.Job{
Stop: true,
Region: "global",
Namespace: "foo",
ID: "foo",
ParentID: "lol",
Name: "name",

View File

@ -55,7 +55,7 @@ func (s *HTTPServer) nodeForceEvaluate(resp http.ResponseWriter, req *http.Reque
args := structs.NodeEvaluateRequest{
NodeID: nodeID,
}
s.parseRegion(req, &args.Region)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.NodeUpdateResponse
if err := s.agent.RPC("Node.Evaluate", &args, &out); err != nil {
@ -109,7 +109,7 @@ func (s *HTTPServer) nodeToggleDrain(resp http.ResponseWriter, req *http.Request
NodeID: nodeID,
Drain: enable,
}
s.parseRegion(req, &args.Region)
s.parseWriteRequest(req, &args.WriteRequest)
var out structs.NodeDrainUpdateResponse
if err := s.agent.RPC("Node.UpdateDrain", &args, &out); err != nil {

View File

@ -50,7 +50,7 @@ func (s *HTTPServer) OperatorRaftPeer(resp http.ResponseWriter, req *http.Reques
}
var args structs.RaftPeerByAddressRequest
s.parseRegion(req, &args.Region)
s.parseWriteRequest(req, &args.WriteRequest)
params := req.URL.Query()
if _, ok := params["address"]; ok {

View File

@ -22,6 +22,10 @@ func (s *HTTPServer) newSearchRequest(resp http.ResponseWriter, req *http.Reques
return nil, CodedError(400, err.Error())
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.SearchResponse
if err := s.agent.RPC("Search.PrefixSearch", &args, &out); err != nil {
return nil, err

View File

@ -15,11 +15,6 @@ import (
)
const (
// Names of environment variables used to supply various
// config options to the Nomad CLI.
EnvNomadAddress = "NOMAD_ADDR"
EnvNomadRegion = "NOMAD_REGION"
// Constants for CLI identifier length
shortId = 8
fullId = 36
@ -49,6 +44,9 @@ type Meta struct {
// The region to send API requests
region string
// namespace to send API requests
namespace string
caCert string
caPath string
clientCert string
@ -68,6 +66,7 @@ func (m *Meta) FlagSet(n string, fs FlagSetFlags) *flag.FlagSet {
if fs&FlagSetClient != 0 {
f.StringVar(&m.flagAddress, "address", "", "")
f.StringVar(&m.region, "region", "", "")
f.StringVar(&m.namespace, "namespace", "", "")
f.BoolVar(&m.noColor, "no-color", false, "")
f.StringVar(&m.caCert, "ca-cert", "", "")
f.StringVar(&m.caPath, "ca-path", "", "")
@ -103,6 +102,7 @@ func (m *Meta) AutocompleteFlags(fs FlagSetFlags) complete.Flags {
return complete.Flags{
"-address": complete.PredictAnything,
"-region": complete.PredictAnything,
"-namespace": NamespacePredictor(m.Client, nil),
"-no-color": complete.PredictNothing,
"-ca-cert": complete.PredictFiles("*"),
"-ca-path": complete.PredictDirs("*"),
@ -113,22 +113,23 @@ func (m *Meta) AutocompleteFlags(fs FlagSetFlags) complete.Flags {
}
}
// ApiClientFactory is the signature of a API client factory
type ApiClientFactory func() (*api.Client, error)
// Client is used to initialize and return a new API client using
// the default command line arguments and env vars.
func (m *Meta) Client() (*api.Client, error) {
config := api.DefaultConfig()
if v := os.Getenv(EnvNomadAddress); v != "" {
config.Address = v
}
if m.flagAddress != "" {
config.Address = m.flagAddress
}
if v := os.Getenv(EnvNomadRegion); v != "" {
config.Region = v
}
if m.region != "" {
config.Region = m.region
}
if m.namespace != "" {
config.Namespace = m.namespace
}
// If we need custom TLS configuration, then set it
if m.caCert != "" || m.caPath != "" || m.clientCert != "" || m.clientKey != "" || m.insecure {
t := &api.TLSConfig{
@ -165,6 +166,11 @@ func generalOptionsUsage() string {
Overrides the NOMAD_REGION environment variable if set.
Defaults to the Agent's local region.
-namespace=<namespace>
The target namespace for queries and actions bound to a namespace.
Overrides the NOMAD_NAMESPACE environment variable if set.
Defaults to the "default" namespace.
-no-color
Disables colored command output.

View File

@ -23,6 +23,7 @@ func TestMeta_FlagSet(t *testing.T) {
"address",
"no-color",
"region",
"namespace",
"ca-cert",
"ca-path",
"client-cert",

52
command/namespace.go Normal file
View File

@ -0,0 +1,52 @@
package command
import (
"github.com/hashicorp/nomad/api/contexts"
"github.com/mitchellh/cli"
"github.com/posener/complete"
)
type NamespaceCommand struct {
Meta
}
func (f *NamespaceCommand) Help() string {
return "This command is accessed by using one of the subcommands below."
}
func (f *NamespaceCommand) Synopsis() string {
return "Interact with namespaces"
}
func (f *NamespaceCommand) Run(args []string) int {
return cli.RunResultHelp
}
// NamespacePredictor returns a namespace predictor that can optionally filter
// specific namespaces
func NamespacePredictor(factory ApiClientFactory, filter map[string]struct{}) complete.Predictor {
return complete.PredictFunc(func(a complete.Args) []string {
client, err := factory()
if err != nil {
return nil
}
resp, _, err := client.Search().PrefixSearch(a.Last, contexts.Namespaces, nil)
if err != nil {
return []string{}
}
// Filter the returned namespaces. We assign the unfiltered slice to the
// filtered slice but with no elements. This causes the slices to share
// the underlying array and makes the filtering allocation free.
unfiltered := resp.Matches[contexts.Namespaces]
filtered := unfiltered[:0]
for _, ns := range unfiltered {
if _, ok := filter[ns]; !ok {
filtered = append(filtered, ns)
}
}
return filtered
})
}

View File

@ -0,0 +1,97 @@
package command
import (
"fmt"
"strings"
"github.com/hashicorp/nomad/api"
"github.com/posener/complete"
)
type NamespaceApplyCommand struct {
Meta
}
func (c *NamespaceApplyCommand) Help() string {
helpText := `
Usage: nomad namespace apply [options]
Apply is used to create or update a namespace.
General Options:
` + generalOptionsUsage() + `
Apply Options:
-name
The name of the namespace.
-description
An optional description for the namespace.
`
return strings.TrimSpace(helpText)
}
func (c *NamespaceApplyCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-name": complete.PredictAnything,
"-description": complete.PredictAnything,
})
}
func (c *NamespaceApplyCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictNothing
}
func (c *NamespaceApplyCommand) Synopsis() string {
return "Create or update a namespace"
}
func (c *NamespaceApplyCommand) Run(args []string) int {
var name, description string
flags := c.Meta.FlagSet("namespace apply", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.StringVar(&name, "name", "", "")
flags.StringVar(&description, "description", "", "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got no arguments
args = flags.Args()
if l := len(args); l != 0 {
c.Ui.Error(c.Help())
return 1
}
// Validate we have at-least a name
if name == "" {
c.Ui.Error("Namespace name required")
return 1
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Create the request object.
ns := &api.Namespace{
Name: name,
Description: description,
}
_, err = client.Namespaces().Register(ns, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error applying namespace: %s", err))
return 1
}
return 0
}

View File

@ -0,0 +1,60 @@
// +build pro ent
package command
import (
"strings"
"testing"
"github.com/mitchellh/cli"
"github.com/stretchr/testify/assert"
)
func TestNamespaceApplyCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &NamespaceApplyCommand{}
}
func TestNamespaceApplyCommand_Fails(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &NamespaceApplyCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expected help output, got: %s", out)
}
ui.ErrorWriter.Reset()
if code := cmd.Run([]string{"-address=nope"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "name required") {
t.Fatalf("name required error, got: %s", out)
}
ui.ErrorWriter.Reset()
}
func TestNamespaceApplyCommand_Good(t *testing.T) {
t.Parallel()
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &NamespaceApplyCommand{Meta: Meta{Ui: ui}}
// Create a namespace
name, desc := "foo", "bar"
if code := cmd.Run([]string{"-address=" + url, "-name=" + name, "-description=" + desc}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
namespaces, _, err := client.Namespaces().List(nil)
assert.Nil(t, err)
assert.Len(t, namespaces, 2)
}

View File

@ -0,0 +1,71 @@
package command
import (
"fmt"
"strings"
"github.com/posener/complete"
)
type NamespaceDeleteCommand struct {
Meta
}
func (c *NamespaceDeleteCommand) Help() string {
helpText := `
Usage: nomad namespace delete [options] <namespace>
Delete is used to remove a namespace.
General Options:
` + generalOptionsUsage()
return strings.TrimSpace(helpText)
}
func (c *NamespaceDeleteCommand) AutocompleteFlags() complete.Flags {
return c.Meta.AutocompleteFlags(FlagSetClient)
}
func (c *NamespaceDeleteCommand) AutocompleteArgs() complete.Predictor {
filter := map[string]struct{}{"default": struct{}{}}
return NamespacePredictor(c.Meta.Client, filter)
}
func (c *NamespaceDeleteCommand) Synopsis() string {
return "Delete a namespace"
}
func (c *NamespaceDeleteCommand) Run(args []string) int {
flags := c.Meta.FlagSet("namespace delete", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got one argument
args = flags.Args()
if l := len(args); l != 1 {
c.Ui.Error(c.Help())
return 1
}
namespace := args[0]
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
_, err = client.Namespaces().Delete(namespace, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error deleting namespace: %s", err))
return 1
}
return 0
}

View File

@ -0,0 +1,93 @@
// +build pro ent
package command
import (
"strings"
"testing"
"github.com/hashicorp/nomad/api"
"github.com/mitchellh/cli"
"github.com/posener/complete"
"github.com/stretchr/testify/assert"
)
func TestNamespaceDeleteCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &NamespaceDeleteCommand{}
}
func TestNamespaceDeleteCommand_Fails(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &NamespaceDeleteCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expected help output, got: %s", out)
}
ui.ErrorWriter.Reset()
if code := cmd.Run([]string{"-address=nope", "foo"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "deleting namespace") {
t.Fatalf("connection error, got: %s", out)
}
ui.ErrorWriter.Reset()
}
func TestNamespaceDeleteCommand_Good(t *testing.T) {
t.Parallel()
// Create a server
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &NamespaceDeleteCommand{Meta: Meta{Ui: ui}}
// Create a namespace to delete
ns := &api.Namespace{
Name: "foo",
}
_, err := client.Namespaces().Register(ns, nil)
assert.Nil(t, err)
// Delete a namespace
if code := cmd.Run([]string{"-address=" + url, ns.Name}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
namespaces, _, err := client.Namespaces().List(nil)
assert.Nil(t, err)
assert.Len(t, namespaces, 1)
}
func TestNamespaceDeleteCommand_AutocompleteArgs(t *testing.T) {
assert := assert.New(t)
t.Parallel()
srv, client, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &NamespaceDeleteCommand{Meta: Meta{Ui: ui, flagAddress: url}}
// Create a namespace other than default
ns := &api.Namespace{
Name: "diddo",
}
_, err := client.Namespaces().Register(ns, nil)
assert.Nil(err)
args := complete.Args{Last: "d"}
predictor := cmd.AutocompleteArgs()
res := predictor.Predict(args)
assert.Equal(1, len(res))
assert.Equal(ns.Name, res[0])
}

113
command/namespace_list.go Normal file
View File

@ -0,0 +1,113 @@
package command
import (
"fmt"
"strings"
"github.com/hashicorp/nomad/api"
"github.com/posener/complete"
)
type NamespaceListCommand struct {
Meta
}
func (c *NamespaceListCommand) Help() string {
helpText := `
Usage: nomad namespace list [options]
List is used to list available namespaces.
General Options:
` + generalOptionsUsage() + `
List Options:
-json
Output the namespaces in a JSON format.
-t
Format and display the namespaces using a Go template.
`
return strings.TrimSpace(helpText)
}
func (c *NamespaceListCommand) AutocompleteFlags() complete.Flags {
return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient),
complete.Flags{
"-json": complete.PredictNothing,
"-t": complete.PredictAnything,
})
}
func (c *NamespaceListCommand) AutocompleteArgs() complete.Predictor {
return complete.PredictNothing
}
func (c *NamespaceListCommand) Synopsis() string {
return "List namespaces"
}
func (c *NamespaceListCommand) Run(args []string) int {
var json bool
var tmpl string
flags := c.Meta.FlagSet("namespace list", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&json, "json", false, "")
flags.StringVar(&tmpl, "t", "", "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got no arguments
args = flags.Args()
if l := len(args); l != 0 {
c.Ui.Error(c.Help())
return 1
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
namespaces, _, err := client.Namespaces().List(nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error retrieving namespaces: %s", err))
return 1
}
if json || len(tmpl) > 0 {
out, err := Format(json, tmpl, namespaces)
if err != nil {
c.Ui.Error(err.Error())
return 1
}
c.Ui.Output(out)
return 0
}
c.Ui.Output(formatNamespaces(namespaces))
return 0
}
func formatNamespaces(namespaces []*api.Namespace) string {
if len(namespaces) == 0 {
return "No namespaces found"
}
rows := make([]string, len(namespaces)+1)
rows[0] = "Name|Description"
for i, ns := range namespaces {
rows[i+1] = fmt.Sprintf("%s|%s",
ns.Name,
ns.Description)
}
return formatList(rows)
}

View File

@ -0,0 +1,70 @@
// +build pro ent
package command
import (
"strings"
"testing"
"github.com/mitchellh/cli"
)
func TestNamespaceListCommand_Implements(t *testing.T) {
t.Parallel()
var _ cli.Command = &NamespaceListCommand{}
}
func TestNamespaceListCommand_Fails(t *testing.T) {
t.Parallel()
ui := new(cli.MockUi)
cmd := &NamespaceListCommand{Meta: Meta{Ui: ui}}
// Fails on misuse
if code := cmd.Run([]string{"some", "bad", "args"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, cmd.Help()) {
t.Fatalf("expected help output, got: %s", out)
}
ui.ErrorWriter.Reset()
if code := cmd.Run([]string{"-address=nope"}); code != 1 {
t.Fatalf("expected exit code 1, got: %d", code)
}
if out := ui.ErrorWriter.String(); !strings.Contains(out, "Error retrieving namespaces") {
t.Fatalf("expected failed query error, got: %s", out)
}
ui.ErrorWriter.Reset()
}
func TestNamespaceListCommand_List(t *testing.T) {
t.Parallel()
// Create a server
srv, _, url := testServer(t, true, nil)
defer srv.Shutdown()
ui := new(cli.MockUi)
cmd := &NamespaceListCommand{Meta: Meta{Ui: ui}}
// List should contain default deployment
if code := cmd.Run([]string{"-address=" + url}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
out := ui.OutputWriter.String()
if !strings.Contains(out, "default") || !strings.Contains(out, "Default shared namespace") {
t.Fatalf("expected default namespace, got: %s", out)
}
ui.OutputWriter.Reset()
// List json
t.Log(url)
if code := cmd.Run([]string{"-address=" + url, "-json"}); code != 0 {
t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String())
}
out = ui.OutputWriter.String()
if !strings.Contains(out, "CreateIndex") {
t.Fatalf("expected json output, got: %s", out)
}
ui.OutputWriter.Reset()
}

View File

@ -129,6 +129,11 @@ func (c *PlanCommand) Run(args []string) int {
client.SetRegion(*r)
}
// Force the namespace to be that of the job.
if n := job.Namespace; n != nil {
client.SetNamespace(*n)
}
// Submit the job
resp, _, err := client.Jobs().Plan(job, diff, nil)
if err != nil {

View File

@ -167,6 +167,11 @@ func (c *RunCommand) Run(args []string) int {
client.SetRegion(*r)
}
// Force the namespace to be that of the job.
if n := job.Namespace; n != nil {
client.SetNamespace(*n)
}
// Check if the job is periodic or is a parameterized job
periodic := job.IsPeriodic()
paramjob := job.IsParameterized()

View File

@ -163,6 +163,26 @@ func Commands(metaPtr *command.Meta) map[string]cli.CommandFactory {
Meta: meta,
}, nil
},
"namespace": func() (cli.Command, error) {
return &command.NamespaceCommand{
Meta: meta,
}, nil
},
"namespace apply": func() (cli.Command, error) {
return &command.NamespaceApplyCommand{
Meta: meta,
}, nil
},
"namespace delete": func() (cli.Command, error) {
return &command.NamespaceDeleteCommand{
Meta: meta,
}, nil
},
"namespace list": func() (cli.Command, error) {
return &command.NamespaceListCommand{
Meta: meta,
}, nil
},
"node-drain": func() (cli.Command, error) {
return &command.NodeDrainCommand{
Meta: meta,

View File

@ -136,6 +136,7 @@ func parseJob(result *api.Job, list *ast.ObjectList) error {
"id",
"meta",
"name",
"namespace",
"periodic",
"priority",
"region",

View File

@ -31,6 +31,7 @@ func TestParse(t *testing.T) {
AllAtOnce: helper.BoolToPtr(true),
Datacenters: []string{"us2", "eu1"},
Region: helper.StringToPtr("fooregion"),
Namespace: helper.StringToPtr("foonamespace"),
VaultToken: helper.StringToPtr("foo"),
Meta: map[string]string{

View File

@ -1,5 +1,6 @@
job "binstore-storagelocker" {
region = "fooregion"
namespace = "foonamespace"
type = "batch"
priority = 52
all_at_once = true

View File

@ -35,6 +35,7 @@ func RunCustom(args []string, commands map[string]cli.CommandFactory) int {
case "executor":
case "fs ls", "fs cat", "fs stat":
case "job deployments", "job dispatch", "job history", "job promote", "job revert":
case "namespace list", "namespace delete", "namespace apply":
case "operator raft", "operator raft list-peers", "operator raft remove-peer":
case "syslog":
default:

View File

@ -30,9 +30,9 @@ func (a *Alloc) List(args *structs.AllocListRequest, reply *structs.AllocListRes
var err error
var iter memdb.ResultIterator
if prefix := args.QueryOptions.Prefix; prefix != "" {
iter, err = state.AllocsByIDPrefix(ws, prefix)
iter, err = state.AllocsByIDPrefix(ws, args.RequestNamespace(), prefix)
} else {
iter, err = state.Allocs(ws)
iter, err = state.AllocsByNamespace(ws, args.RequestNamespace())
}
if err != nil {
return err

View File

@ -32,7 +32,10 @@ func TestAllocEndpoint_List(t *testing.T) {
// Lookup the allocations
get := &structs.AllocListRequest{
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.AllocListResponse
if err := msgpackrpc.CallWithCodec(codec, "Alloc.List", get, &resp); err != nil {
@ -51,7 +54,11 @@ func TestAllocEndpoint_List(t *testing.T) {
// Lookup the allocations by prefix
get = &structs.AllocListRequest{
QueryOptions: structs.QueryOptions{Region: "global", Prefix: alloc.ID[:4]},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
Prefix: alloc.ID[:4],
},
}
var resp2 structs.AllocListResponse
@ -95,6 +102,7 @@ func TestAllocEndpoint_List_Blocking(t *testing.T) {
req := &structs.AllocListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
MinQueryIndex: 1,
},
}

View File

@ -95,7 +95,8 @@ func (c *CoreScheduler) jobGC(eval *structs.Evaluation) error {
}
// Collect the allocations, evaluations and jobs to GC
var gcAlloc, gcEval, gcJob []string
var gcAlloc, gcEval []string
var gcJob []*structs.Job
OUTER:
for i := iter.Next(); i != nil; i = iter.Next() {
@ -107,7 +108,7 @@ OUTER:
}
ws := memdb.NewWatchSet()
evals, err := c.snap.EvalsByJob(ws, job.ID)
evals, err := c.snap.EvalsByJob(ws, job.Namespace, job.ID)
if err != nil {
c.srv.logger.Printf("[ERR] sched.core: failed to get evals for job %s: %v", job.ID, err)
continue
@ -132,7 +133,7 @@ OUTER:
// Job is eligible for garbage collection
if allEvalsGC {
gcJob = append(gcJob, job.ID)
gcJob = append(gcJob, job)
gcAlloc = append(gcAlloc, jobAlloc...)
gcEval = append(gcEval, jobEval...)
}
@ -153,10 +154,11 @@ OUTER:
// Call to the leader to deregister the jobs.
for _, job := range gcJob {
req := structs.JobDeregisterRequest{
JobID: job,
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Region: c.srv.config.Region,
Namespace: job.Namespace,
},
}
var resp structs.JobDeregisterResponse
@ -244,7 +246,7 @@ func (c *CoreScheduler) gcEval(eval *structs.Evaluation, thresholdIndex uint64,
// allocations.
if eval.Type == structs.JobTypeBatch {
// Check if the job is running
job, err := c.snap.JobByID(ws, eval.JobID)
job, err := c.snap.JobByID(ws, eval.Namespace, eval.JobID)
if err != nil {
return false, nil, err
}

View File

@ -184,7 +184,7 @@ func TestCoreScheduler_EvalGC_Batch(t *testing.T) {
t.Fatalf("bad: %v", outA2)
}
outB, err := state.JobByID(ws, job.ID)
outB, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -698,7 +698,7 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) {
// Should still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -744,7 +744,7 @@ func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) {
}
// Should not still exist
out, err = state.JobByID(ws, job.ID)
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -835,7 +835,7 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) {
// Should still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -881,7 +881,7 @@ func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) {
}
// Should not still exist
out, err = state.JobByID(ws, job.ID)
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -979,7 +979,7 @@ func TestCoreScheduler_JobGC_OneShot(t *testing.T) {
// Should still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1084,7 +1084,7 @@ func TestCoreScheduler_JobGC_Stopped(t *testing.T) {
// Shouldn't still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1161,7 +1161,7 @@ func TestCoreScheduler_JobGC_Force(t *testing.T) {
// Shouldn't still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1217,7 +1217,7 @@ func TestCoreScheduler_JobGC_Parameterized(t *testing.T) {
// Should still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1248,7 +1248,7 @@ func TestCoreScheduler_JobGC_Parameterized(t *testing.T) {
}
// Should not exist
out, err = state.JobByID(ws, job.ID)
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1292,7 +1292,7 @@ func TestCoreScheduler_JobGC_Periodic(t *testing.T) {
// Should still exist
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1323,7 +1323,7 @@ func TestCoreScheduler_JobGC_Periodic(t *testing.T) {
}
// Should not exist
out, err = state.JobByID(ws, job.ID)
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}

View File

@ -224,9 +224,9 @@ func (d *Deployment) List(args *structs.DeploymentListRequest, reply *structs.De
var err error
var iter memdb.ResultIterator
if prefix := args.QueryOptions.Prefix; prefix != "" {
iter, err = state.DeploymentsByIDPrefix(ws, prefix)
iter, err = state.DeploymentsByIDPrefix(ws, args.RequestNamespace(), prefix)
} else {
iter, err = state.Deployments(ws)
iter, err = state.DeploymentsByNamespace(ws, args.RequestNamespace())
}
if err != nil {
return err

View File

@ -33,7 +33,10 @@ func TestDeploymentEndpoint_GetDeployment(t *testing.T) {
// Lookup the deployments
get := &structs.DeploymentSpecificRequest{
DeploymentID: d.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.SingleDeploymentResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Deployment.GetDeployment", get, &resp), "RPC")
@ -76,6 +79,7 @@ func TestDeploymentEndpoint_GetDeployment_Blocking(t *testing.T) {
DeploymentID: d2.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
MinQueryIndex: 150,
},
}
@ -207,7 +211,7 @@ func TestDeploymentEndpoint_Fail_Rollback(t *testing.T) {
assert.Equal(resp.DeploymentModifyIndex, dout.ModifyIndex, "wrong modify index")
// Lookup the job
jout, err := state.JobByID(ws, j.ID)
jout, err := state.JobByID(ws, j.Namespace, j.ID)
assert.Nil(err, "JobByID")
assert.NotNil(jout, "job")
assert.EqualValues(2, jout.Version, "reverted job version")
@ -467,7 +471,7 @@ func TestDeploymentEndpoint_SetAllocHealth_Rollback(t *testing.T) {
assert.False(*aout.DeploymentStatus.Healthy, "alloc deployment healthy")
// Lookup the job
jout, err := state.JobByID(ws, j.ID)
jout, err := state.JobByID(ws, j.Namespace, j.ID)
assert.Nil(err, "JobByID")
assert.NotNil(jout, "job")
assert.EqualValues(2, jout.Version, "reverted job version")
@ -492,7 +496,10 @@ func TestDeploymentEndpoint_List(t *testing.T) {
// Lookup the deployments
get := &structs.DeploymentListRequest{
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.DeploymentListResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Deployment.List", get, &resp), "RPC")
@ -502,7 +509,11 @@ func TestDeploymentEndpoint_List(t *testing.T) {
// Lookup the deploys by prefix
get = &structs.DeploymentListRequest{
QueryOptions: structs.QueryOptions{Region: "global", Prefix: d.ID[:4]},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
Prefix: d.ID[:4],
},
}
var resp2 structs.DeploymentListResponse
@ -536,6 +547,7 @@ func TestDeploymentEndpoint_List_Blocking(t *testing.T) {
req := &structs.DeploymentListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
MinQueryIndex: 1,
},
}
@ -593,7 +605,10 @@ func TestDeploymentEndpoint_Allocations(t *testing.T) {
// Lookup the allocations
get := &structs.DeploymentSpecificRequest{
DeploymentID: d.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.AllocListResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Deployment.Allocations", get, &resp), "RPC")
@ -632,6 +647,7 @@ func TestDeploymentEndpoint_Allocations_Blocking(t *testing.T) {
DeploymentID: d.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
MinQueryIndex: 1,
},
}

View File

@ -156,6 +156,9 @@ func (w *deploymentWatcher) SetAllocHealth(
u = w.getDeploymentStatusUpdate(structs.DeploymentStatusFailed, desc)
}
// Canonicalize the job in case it doesn't have namespace set
j.Canonicalize()
// Create the request
areq := &structs.ApplyDeploymentAllocHealthRequest{
DeploymentAllocHealthRequest: *req,
@ -396,7 +399,7 @@ func (w *deploymentWatcher) latestStableJob() (*structs.Job, error) {
return nil, err
}
versions, err := snap.JobVersionsByID(nil, w.d.JobID)
versions, err := snap.JobVersionsByID(nil, w.d.Namespace, w.d.JobID)
if err != nil {
return nil, err
}
@ -443,6 +446,7 @@ func (w *deploymentWatcher) createEvalBatched(forIndex uint64) {
func (w *deploymentWatcher) getEval() *structs.Evaluation {
return &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: w.j.Namespace,
Priority: w.j.Priority,
Type: w.j.Type,
TriggeredBy: structs.EvalTriggerDeploymentWatcher,
@ -514,7 +518,7 @@ func (w *deploymentWatcher) latestEvalIndex() (uint64, error) {
return 0, err
}
evals, err := snap.EvalsByJob(nil, w.d.JobID)
evals, err := snap.EvalsByJob(nil, w.d.Namespace, w.d.JobID)
if err != nil {
return 0, err
}

View File

@ -241,7 +241,7 @@ func (w *Watcher) addLocked(d *structs.Deployment) (*deploymentWatcher, error) {
return nil, err
}
job, err := snap.JobByID(nil, d.JobID)
job, err := snap.JobByID(nil, d.Namespace, d.JobID)
if err != nil {
return nil, err
}

View File

@ -664,7 +664,7 @@ func TestDeploymentWatcher_Watch(t *testing.T) {
// Wait for there to be one eval
testutil.WaitForResult(func() (bool, error) {
ws := memdb.NewWatchSet()
evals, err := m.state.EvalsByJob(ws, j.ID)
evals, err := m.state.EvalsByJob(ws, j.Namespace, j.ID)
if err != nil {
return false, err
}
@ -702,7 +702,7 @@ func TestDeploymentWatcher_Watch(t *testing.T) {
// Wait for there to be one eval
testutil.WaitForResult(func() (bool, error) {
ws := memdb.NewWatchSet()
evals, err := m.state.EvalsByJob(ws, j.ID)
evals, err := m.state.EvalsByJob(ws, j.Namespace, j.ID)
if err != nil {
return false, err
}
@ -789,12 +789,12 @@ func TestWatcher_BatchEvals(t *testing.T) {
// Wait for there to be one eval for each job
testutil.WaitForResult(func() (bool, error) {
ws := memdb.NewWatchSet()
evals1, err := m.state.EvalsByJob(ws, j1.ID)
evals1, err := m.state.EvalsByJob(ws, j1.Namespace, j1.ID)
if err != nil {
return false, err
}
evals2, err := m.state.EvalsByJob(ws, j2.ID)
evals2, err := m.state.EvalsByJob(ws, j2.Namespace, j2.ID)
if err != nil {
return false, err
}

15
nomad/endpoints_oss.go Normal file
View File

@ -0,0 +1,15 @@
// +build !pro,!ent
package nomad
// EnterpriseEndpoints holds the set of enterprise only endpoints to register
type EnterpriseEndpoints struct{}
// NewEnterpriseEndpoints returns a stub of the enterprise endpoints since there
// are none in oss
func NewEnterpriseEndpoints(s *Server) *EnterpriseEndpoints {
return &EnterpriseEndpoints{}
}
// Register is a no-op in oss.
func (e *EnterpriseEndpoints) Register(s *Server) {}

View File

@ -52,8 +52,8 @@ type EvalBroker struct {
// and is used to eventually fail an evaluation.
evals map[string]int
// jobEvals tracks queued evaluations by JobID to serialize them
jobEvals map[string]string
// jobEvals tracks queued evaluations by a job's ID and namespace to serialize them
jobEvals map[structs.NamespacedID]string
// blocked tracks the blocked evaluations by JobID in a priority queue
blocked map[string]PendingEvaluations
@ -117,7 +117,7 @@ func NewEvalBroker(timeout, initialNackDelay, subsequentNackDelay time.Duration,
enabled: false,
stats: new(BrokerStats),
evals: make(map[string]int),
jobEvals: make(map[string]string),
jobEvals: make(map[structs.NamespacedID]string),
blocked: make(map[string]PendingEvaluations),
ready: make(map[string]PendingEvaluations),
unack: make(map[string]*unackEval),
@ -235,9 +235,13 @@ func (b *EvalBroker) enqueueLocked(eval *structs.Evaluation, queue string) {
}
// Check if there is an evaluation for this JobID pending
pendingEval := b.jobEvals[eval.JobID]
tuple := structs.NamespacedID{
ID: eval.JobID,
Namespace: eval.Namespace,
}
pendingEval := b.jobEvals[tuple]
if pendingEval == "" {
b.jobEvals[eval.JobID] = eval.ID
b.jobEvals[tuple] = eval.ID
} else if pendingEval != eval.ID {
blocked := b.blocked[eval.JobID]
heap.Push(&blocked, eval)
@ -513,7 +517,12 @@ func (b *EvalBroker) Ack(evalID, token string) error {
// Cleanup
delete(b.unack, evalID)
delete(b.evals, evalID)
delete(b.jobEvals, jobID)
tuple := structs.NamespacedID{
ID: jobID,
Namespace: unack.Eval.Namespace,
}
delete(b.jobEvals, tuple)
// Check if there are any blocked evaluations
if blocked := b.blocked[jobID]; len(blocked) != 0 {
@ -660,7 +669,7 @@ func (b *EvalBroker) Flush() {
b.stats.TotalWaiting = 0
b.stats.ByScheduler = make(map[string]*SchedulerStats)
b.evals = make(map[string]int)
b.jobEvals = make(map[string]string)
b.jobEvals = make(map[structs.NamespacedID]string)
b.blocked = make(map[string]PendingEvaluations)
b.ready = make(map[string]PendingEvaluations)
b.unack = make(map[string]*unackEval)

View File

@ -387,24 +387,41 @@ func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) {
b := testBroker(t, 0)
b.SetEnabled(true)
ns1 := "namespace-one"
ns2 := "namespace-two"
eval := mock.Eval()
eval.Namespace = ns1
b.Enqueue(eval)
eval2 := mock.Eval()
eval2.JobID = eval.JobID
eval2.Namespace = ns1
eval2.CreateIndex = eval.CreateIndex + 1
b.Enqueue(eval2)
eval3 := mock.Eval()
eval3.JobID = eval.JobID
eval3.Namespace = ns1
eval3.CreateIndex = eval.CreateIndex + 2
b.Enqueue(eval3)
eval4 := mock.Eval()
eval4.JobID = eval.JobID
eval4.Namespace = ns2
eval4.CreateIndex = eval.CreateIndex + 3
b.Enqueue(eval4)
eval5 := mock.Eval()
eval5.JobID = eval.JobID
eval5.Namespace = ns2
eval5.CreateIndex = eval.CreateIndex + 4
b.Enqueue(eval5)
stats := b.Stats()
if stats.TotalReady != 1 {
if stats.TotalReady != 2 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalBlocked != 2 {
if stats.TotalBlocked != 3 {
t.Fatalf("bad: %#v", stats)
}
@ -419,13 +436,13 @@ func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) {
// Check the stats
stats = b.Stats()
if stats.TotalReady != 0 {
if stats.TotalReady != 1 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalUnacked != 1 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalBlocked != 2 {
if stats.TotalBlocked != 3 {
t.Fatalf("bad: %#v", stats)
}
@ -437,13 +454,13 @@ func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) {
// Check the stats
stats = b.Stats()
if stats.TotalReady != 1 {
if stats.TotalReady != 2 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalUnacked != 0 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalBlocked != 1 {
if stats.TotalBlocked != 2 {
t.Fatalf("bad: %#v", stats)
}
@ -456,6 +473,84 @@ func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) {
t.Fatalf("bad : %#v", out)
}
// Check the stats
stats = b.Stats()
if stats.TotalReady != 1 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalUnacked != 1 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalBlocked != 2 {
t.Fatalf("bad: %#v", stats)
}
// Ack out
err = b.Ack(eval2.ID, token)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the stats
stats = b.Stats()
if stats.TotalReady != 2 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalUnacked != 0 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalBlocked != 1 {
t.Fatalf("bad: %#v", stats)
}
// Dequeue should work
out, token, err = b.Dequeue(defaultSched, time.Second)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != eval3 {
t.Fatalf("bad : %#v", out)
}
// Check the stats
stats = b.Stats()
if stats.TotalReady != 1 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalUnacked != 1 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalBlocked != 1 {
t.Fatalf("bad: %#v", stats)
}
// Ack out
err = b.Ack(eval3.ID, token)
if err != nil {
t.Fatalf("err: %v", err)
}
// Check the stats
stats = b.Stats()
if stats.TotalReady != 1 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalUnacked != 0 {
t.Fatalf("bad: %#v", stats)
}
if stats.TotalBlocked != 1 {
t.Fatalf("bad: %#v", stats)
}
// Dequeue should work
out, token, err = b.Dequeue(defaultSched, time.Second)
if err != nil {
t.Fatalf("err: %v", err)
}
if out != eval4 {
t.Fatalf("bad : %#v", out)
}
// Check the stats
stats = b.Stats()
if stats.TotalReady != 0 {
@ -469,7 +564,7 @@ func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) {
}
// Ack out
err = b.Ack(eval2.ID, token)
err = b.Ack(eval4.ID, token)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -491,7 +586,7 @@ func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) {
if err != nil {
t.Fatalf("err: %v", err)
}
if out != eval3 {
if out != eval5 {
t.Fatalf("bad : %#v", out)
}
@ -508,7 +603,7 @@ func TestEvalBroker_Serialize_DuplicateJobID(t *testing.T) {
}
// Ack out
err = b.Ack(eval3.ID, token)
err = b.Ack(eval5.ID, token)
if err != nil {
t.Fatalf("err: %v", err)
}

View File

@ -284,9 +284,9 @@ func (e *Eval) List(args *structs.EvalListRequest,
var err error
var iter memdb.ResultIterator
if prefix := args.QueryOptions.Prefix; prefix != "" {
iter, err = state.EvalsByIDPrefix(ws, prefix)
iter, err = state.EvalsByIDPrefix(ws, args.RequestNamespace(), prefix)
} else {
iter, err = state.Evals(ws)
iter, err = state.EvalsByNamespace(ws, args.RequestNamespace())
}
if err != nil {
return err

View File

@ -442,7 +442,10 @@ func TestEvalEndpoint_List(t *testing.T) {
// Lookup the eval
get := &structs.EvalListRequest{
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.EvalListResponse
if err := msgpackrpc.CallWithCodec(codec, "Eval.List", get, &resp); err != nil {
@ -458,7 +461,11 @@ func TestEvalEndpoint_List(t *testing.T) {
// Lookup the eval by prefix
get = &structs.EvalListRequest{
QueryOptions: structs.QueryOptions{Region: "global", Prefix: "aaaabb"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
Prefix: "aaaabb",
},
}
var resp2 structs.EvalListResponse
if err := msgpackrpc.CallWithCodec(codec, "Eval.List", get, &resp2); err != nil {
@ -495,6 +502,7 @@ func TestEvalEndpoint_List_Blocking(t *testing.T) {
req := &structs.EvalListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
MinQueryIndex: 1,
},
}

View File

@ -45,6 +45,20 @@ const (
ACLTokenSnapshot
)
// LogApplier is the definition of a function that can apply a Raft log
type LogApplier func(buf []byte, index uint64) interface{}
// LogAppliers is a mapping of the Raft MessageType to the appropriate log
// applier
type LogAppliers map[structs.MessageType]LogApplier
// SnapshotRestorer is the definition of a function that can apply a Raft log
type SnapshotRestorer func(restore *state.StateRestore, dec *codec.Decoder) error
// SnapshotRestorers is a mapping of the SnapshotType to the appropriate
// snapshot restorer.
type SnapshotRestorers map[SnapshotType]SnapshotRestorer
// nomadFSM implements a finite state machine that is used
// along with Raft to provide strong consistency. We implement
// this outside the Server to avoid exposing this outside the package.
@ -57,6 +71,12 @@ type nomadFSM struct {
state *state.StateStore
timetable *TimeTable
// enterpriseAppliers holds the set of enterprise only LogAppliers
enterpriseAppliers LogAppliers
// enterpriseRestorers holds the set of enterprise only snapshot restorers
enterpriseRestorers SnapshotRestorers
// stateLock is only used to protect outside callers to State() from
// racing with Restore(), which is called by Raft (it puts in a totally
// new state store). Everything internal here is synchronized by the
@ -93,7 +113,16 @@ func NewFSM(evalBroker *EvalBroker, periodic *PeriodicDispatch,
logger: log.New(logOutput, "", log.LstdFlags),
state: state,
timetable: NewTimeTable(timeTableGranularity, timeTableLimit),
enterpriseAppliers: make(map[structs.MessageType]LogApplier, 8),
enterpriseRestorers: make(map[SnapshotType]SnapshotRestorer, 8),
}
// Register all the log applier functions
fsm.registerLogAppliers()
// Register all the snapshot restorer functions
fsm.registerSnapshotRestorers()
return fsm, nil
}
@ -179,14 +208,20 @@ func (n *nomadFSM) Apply(log *raft.Log) interface{} {
return n.applyACLTokenDelete(buf[1:], log.Index)
case structs.ACLTokenBootstrapRequestType:
return n.applyACLTokenBootstrap(buf[1:], log.Index)
default:
}
// Check enterprise only message types.
if applier, ok := n.enterpriseAppliers[msgType]; ok {
return applier(buf[1:], log.Index)
}
// We didn't match anything, either panic or ignore
if ignoreUnknown {
n.logger.Printf("[WARN] nomad.fsm: ignoring unknown message type (%d), upgrade to newer version", msgType)
return nil
} else {
}
panic(fmt.Errorf("failed to apply request: %#v", buf))
}
}
}
func (n *nomadFSM) applyUpsertNode(buf []byte, index uint64) interface{} {
@ -304,7 +339,7 @@ func (n *nomadFSM) applyUpsertJob(buf []byte, index uint64) interface{} {
// job was not launched. In this case, we use the insertion time to
// determine if a launch was missed.
if req.Job.IsPeriodic() {
prevLaunch, err := n.state.PeriodicLaunchByID(ws, req.Job.ID)
prevLaunch, err := n.state.PeriodicLaunchByID(ws, req.Namespace, req.Job.ID)
if err != nil {
n.logger.Printf("[ERR] nomad.fsm: PeriodicLaunchByID failed: %v", err)
return err
@ -313,7 +348,11 @@ func (n *nomadFSM) applyUpsertJob(buf []byte, index uint64) interface{} {
// Record the insertion time as a launch. We overload the launch table
// such that the first entry is the insertion time.
if prevLaunch == nil {
launch := &structs.PeriodicLaunch{ID: req.Job.ID, Launch: time.Now()}
launch := &structs.PeriodicLaunch{
ID: req.Job.ID,
Namespace: req.Namespace,
Launch: time.Now(),
}
if err := n.state.UpsertPeriodicLaunch(index, launch); err != nil {
n.logger.Printf("[ERR] nomad.fsm: UpsertPeriodicLaunch failed: %v", err)
return err
@ -324,7 +363,7 @@ func (n *nomadFSM) applyUpsertJob(buf []byte, index uint64) interface{} {
// Check if the parent job is periodic and mark the launch time.
parentID := req.Job.ParentID
if parentID != "" {
parent, err := n.state.JobByID(ws, parentID)
parent, err := n.state.JobByID(ws, req.Namespace, parentID)
if err != nil {
n.logger.Printf("[ERR] nomad.fsm: JobByID(%v) lookup for parent failed: %v", parentID, err)
return err
@ -340,7 +379,11 @@ func (n *nomadFSM) applyUpsertJob(buf []byte, index uint64) interface{} {
return err
}
launch := &structs.PeriodicLaunch{ID: parentID, Launch: t}
launch := &structs.PeriodicLaunch{
ID: parentID,
Namespace: req.Namespace,
Launch: t,
}
if err := n.state.UpsertPeriodicLaunch(index, launch); err != nil {
n.logger.Printf("[ERR] nomad.fsm: UpsertPeriodicLaunch failed: %v", err)
return err
@ -359,13 +402,13 @@ func (n *nomadFSM) applyDeregisterJob(buf []byte, index uint64) interface{} {
}
// If it is periodic remove it from the dispatcher
if err := n.periodicDispatcher.Remove(req.JobID); err != nil {
if err := n.periodicDispatcher.Remove(req.Namespace, req.JobID); err != nil {
n.logger.Printf("[ERR] nomad.fsm: periodicDispatcher.Remove failed: %v", err)
return err
}
if req.Purge {
if err := n.state.DeleteJob(index, req.JobID); err != nil {
if err := n.state.DeleteJob(index, req.Namespace, req.JobID); err != nil {
n.logger.Printf("[ERR] nomad.fsm: DeleteJob failed: %v", err)
return err
}
@ -373,18 +416,18 @@ func (n *nomadFSM) applyDeregisterJob(buf []byte, index uint64) interface{} {
// We always delete from the periodic launch table because it is possible that
// the job was updated to be non-perioidic, thus checking if it is periodic
// doesn't ensure we clean it up properly.
n.state.DeletePeriodicLaunch(index, req.JobID)
n.state.DeletePeriodicLaunch(index, req.Namespace, req.JobID)
} else {
// Get the current job and mark it as stopped and re-insert it.
ws := memdb.NewWatchSet()
current, err := n.state.JobByID(ws, req.JobID)
current, err := n.state.JobByID(ws, req.Namespace, req.JobID)
if err != nil {
n.logger.Printf("[ERR] nomad.fsm: JobByID lookup failed: %v", err)
return err
}
if current == nil {
return fmt.Errorf("job %q doesn't exist to be deregistered", req.JobID)
return fmt.Errorf("job %q in namespace %q doesn't exist to be deregistered", req.JobID, req.Namespace)
}
stopped := current.Copy()
@ -673,7 +716,7 @@ func (n *nomadFSM) applyJobStability(buf []byte, index uint64) interface{} {
panic(fmt.Errorf("failed to decode request: %v", err))
}
if err := n.state.UpdateJobStability(index, req.JobID, req.JobVersion, req.Stable); err != nil {
if err := n.state.UpdateJobStability(index, req.Namespace, req.JobID, req.JobVersion, req.Stable); err != nil {
n.logger.Printf("[ERR] nomad.fsm: UpdateJobStability failed: %v", err)
return err
}
@ -807,7 +850,8 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
}
// Decode
switch SnapshotType(msgType[0]) {
snapType := SnapshotType(msgType[0])
switch snapType {
case TimeTableSnapshot:
if err := n.timetable.Deserialize(dec); err != nil {
return fmt.Errorf("time table deserialize failed: %v", err)
@ -846,6 +890,12 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
if err := dec.Decode(eval); err != nil {
return err
}
// COMPAT: Handle upgrade to v0.7.0
if eval.Namespace == "" {
eval.Namespace = structs.DefaultNamespace
}
if err := restore.EvalRestore(eval); err != nil {
return err
}
@ -855,6 +905,12 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
if err := dec.Decode(alloc); err != nil {
return err
}
// COMPAT: Handle upgrade to v0.7.0
if alloc.Namespace == "" {
alloc.Namespace = structs.DefaultNamespace
}
if err := restore.AllocRestore(alloc); err != nil {
return err
}
@ -873,6 +929,12 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
if err := dec.Decode(launch); err != nil {
return err
}
// COMPAT: Handle upgrade to v0.7.0
if launch.Namespace == "" {
launch.Namespace = structs.DefaultNamespace
}
if err := restore.PeriodicLaunchRestore(launch); err != nil {
return err
}
@ -882,6 +944,12 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
if err := dec.Decode(summary); err != nil {
return err
}
// COMPAT: Handle upgrade to v0.7.0
if summary.Namespace == "" {
summary.Namespace = structs.DefaultNamespace
}
if err := restore.JobSummaryRestore(summary); err != nil {
return err
}
@ -900,6 +968,12 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
if err := dec.Decode(version); err != nil {
return err
}
// COMPAT: Handle upgrade to v0.7.0
if version.Namespace == "" {
version.Namespace = structs.DefaultNamespace
}
if err := restore.JobVersionRestore(version); err != nil {
return err
}
@ -909,6 +983,12 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
if err := dec.Decode(deployment); err != nil {
return err
}
// COMPAT: Handle upgrade to v0.7.0
if deployment.Namespace == "" {
deployment.Namespace = structs.DefaultNamespace
}
if err := restore.DeploymentRestore(deployment); err != nil {
return err
}
@ -932,8 +1012,17 @@ func (n *nomadFSM) Restore(old io.ReadCloser) error {
}
default:
// Check if this is an enterprise only object being restored
restorer, ok := n.enterpriseRestorers[snapType]
if !ok {
return fmt.Errorf("Unrecognized snapshot type: %v", msgType)
}
// Restore the enterprise only object
if err := restorer(restore, dec); err != nil {
return err
}
}
}
restore.Commit()
@ -1006,6 +1095,7 @@ func (n *nomadFSM) reconcileQueuedAllocations(index uint64) error {
// Create an eval and mark it as requiring annotations and insert that as well
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: job.Namespace,
Priority: job.Priority,
Type: job.Type,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1026,7 +1116,7 @@ func (n *nomadFSM) reconcileQueuedAllocations(index uint64) error {
}
// Get the job summary from the fsm state store
originalSummary, err := n.state.JobSummaryByID(ws, job.ID)
originalSummary, err := n.state.JobSummaryByID(ws, job.Namespace, job.ID)
if err != nil {
return err
}
@ -1145,6 +1235,11 @@ func (s *nomadSnapshot) Persist(sink raft.SnapshotSink) error {
sink.Cancel()
return err
}
if err := s.persistEnterpriseTables(sink, encoder); err != nil {
sink.Cancel()
return err
}
return nil
}

19
nomad/fsm_registry_oss.go Normal file
View File

@ -0,0 +1,19 @@
// +build !pro,!ent
package nomad
import (
"github.com/hashicorp/raft"
"github.com/ugorji/go/codec"
)
// registerLogAppliers is a no-op for open-source only FSMs.
func (n *nomadFSM) registerLogAppliers() {}
// registerSnapshotRestorers is a no-op for open-source only FSMs.
func (n *nomadFSM) registerSnapshotRestorers() {}
// persistEnterpriseTables is a no-op for open-source only FSMs.
func (s *nomadSnapshot) persistEnterpriseTables(sink raft.SnapshotSink, encoder *codec.Encoder) error {
return nil
}

View File

@ -281,6 +281,9 @@ func TestFSM_RegisterJob(t *testing.T) {
job := mock.PeriodicJob()
req := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Namespace: job.Namespace,
},
}
buf, err := structs.Encode(structs.JobRegisterRequestType, req)
if err != nil {
@ -294,7 +297,7 @@ func TestFSM_RegisterJob(t *testing.T) {
// Verify we are registered
ws := memdb.NewWatchSet()
jobOut, err := fsm.State().JobByID(ws, req.Job.ID)
jobOut, err := fsm.State().JobByID(ws, req.Namespace, req.Job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -306,12 +309,16 @@ func TestFSM_RegisterJob(t *testing.T) {
}
// Verify it was added to the periodic runner.
if _, ok := fsm.periodicDispatcher.tracked[job.ID]; !ok {
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
if _, ok := fsm.periodicDispatcher.tracked[tuple]; !ok {
t.Fatal("job not added to periodic runner")
}
// Verify the launch time was tracked.
launchOut, err := fsm.State().PeriodicLaunchByID(ws, req.Job.ID)
launchOut, err := fsm.State().PeriodicLaunchByID(ws, req.Namespace, req.Job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -330,6 +337,9 @@ func TestFSM_DeregisterJob_Purge(t *testing.T) {
job := mock.PeriodicJob()
req := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Namespace: job.Namespace,
},
}
buf, err := structs.Encode(structs.JobRegisterRequestType, req)
if err != nil {
@ -344,6 +354,9 @@ func TestFSM_DeregisterJob_Purge(t *testing.T) {
req2 := structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{
Namespace: job.Namespace,
},
}
buf, err = structs.Encode(structs.JobDeregisterRequestType, req2)
if err != nil {
@ -357,7 +370,7 @@ func TestFSM_DeregisterJob_Purge(t *testing.T) {
// Verify we are NOT registered
ws := memdb.NewWatchSet()
jobOut, err := fsm.State().JobByID(ws, req.Job.ID)
jobOut, err := fsm.State().JobByID(ws, req.Namespace, req.Job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -366,12 +379,16 @@ func TestFSM_DeregisterJob_Purge(t *testing.T) {
}
// Verify it was removed from the periodic runner.
if _, ok := fsm.periodicDispatcher.tracked[job.ID]; ok {
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
if _, ok := fsm.periodicDispatcher.tracked[tuple]; ok {
t.Fatal("job not removed from periodic runner")
}
// Verify it was removed from the periodic launch table.
launchOut, err := fsm.State().PeriodicLaunchByID(ws, req.Job.ID)
launchOut, err := fsm.State().PeriodicLaunchByID(ws, req.Namespace, req.Job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -387,6 +404,9 @@ func TestFSM_DeregisterJob_NoPurge(t *testing.T) {
job := mock.PeriodicJob()
req := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Namespace: job.Namespace,
},
}
buf, err := structs.Encode(structs.JobRegisterRequestType, req)
if err != nil {
@ -401,6 +421,9 @@ func TestFSM_DeregisterJob_NoPurge(t *testing.T) {
req2 := structs.JobDeregisterRequest{
JobID: job.ID,
Purge: false,
WriteRequest: structs.WriteRequest{
Namespace: job.Namespace,
},
}
buf, err = structs.Encode(structs.JobDeregisterRequestType, req2)
if err != nil {
@ -414,7 +437,7 @@ func TestFSM_DeregisterJob_NoPurge(t *testing.T) {
// Verify we are NOT registered
ws := memdb.NewWatchSet()
jobOut, err := fsm.State().JobByID(ws, req.Job.ID)
jobOut, err := fsm.State().JobByID(ws, req.Namespace, req.Job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -426,12 +449,16 @@ func TestFSM_DeregisterJob_NoPurge(t *testing.T) {
}
// Verify it was removed from the periodic runner.
if _, ok := fsm.periodicDispatcher.tracked[job.ID]; ok {
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
if _, ok := fsm.periodicDispatcher.tracked[tuple]; ok {
t.Fatal("job not removed from periodic runner")
}
// Verify it was removed from the periodic launch table.
launchOut, err := fsm.State().PeriodicLaunchByID(ws, req.Job.ID)
launchOut, err := fsm.State().PeriodicLaunchByID(ws, req.Namespace, req.Job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1218,7 +1245,7 @@ func TestFSM_DeploymentStatusUpdate(t *testing.T) {
}
// Check that the job was created
jout, _ := state.JobByID(ws, j.ID)
jout, _ := state.JobByID(ws, j.Namespace, j.ID)
if err != nil {
t.Fatalf("bad: %v", err)
}
@ -1250,6 +1277,9 @@ func TestFSM_JobStabilityUpdate(t *testing.T) {
JobID: job.ID,
JobVersion: job.Version,
Stable: true,
WriteRequest: structs.WriteRequest{
Namespace: job.Namespace,
},
}
buf, err := structs.Encode(structs.JobStabilityRequestType, req)
if err != nil {
@ -1262,7 +1292,7 @@ func TestFSM_JobStabilityUpdate(t *testing.T) {
// Check that the stability was updated properly
ws := memdb.NewWatchSet()
jout, _ := state.JobByIDAndVersion(ws, job.ID, job.Version)
jout, _ := state.JobByIDAndVersion(ws, job.Namespace, job.ID, job.Version)
if err != nil {
t.Fatalf("bad: %v", err)
}
@ -1451,7 +1481,7 @@ func TestFSM_DeploymentAllocHealth(t *testing.T) {
}
// Check that the job was created
jout, _ := state.JobByID(ws, j.ID)
jout, _ := state.JobByID(ws, j.Namespace, j.ID)
if err != nil {
t.Fatalf("bad: %v", err)
}
@ -1725,8 +1755,8 @@ func TestFSM_SnapshotRestore_Jobs(t *testing.T) {
ws := memdb.NewWatchSet()
fsm2 := testSnapshotRestore(t, fsm)
state2 := fsm2.State()
out1, _ := state2.JobByID(ws, job1.ID)
out2, _ := state2.JobByID(ws, job2.ID)
out1, _ := state2.JobByID(ws, job1.Namespace, job1.ID)
out2, _ := state2.JobByID(ws, job2.Namespace, job2.ID)
if !reflect.DeepEqual(job1, out1) {
t.Fatalf("bad: \n%#v\n%#v", out1, job1)
}
@ -1865,18 +1895,26 @@ func TestFSM_SnapshotRestore_PeriodicLaunches(t *testing.T) {
fsm := testFSM(t)
state := fsm.State()
job1 := mock.Job()
launch1 := &structs.PeriodicLaunch{ID: job1.ID, Launch: time.Now()}
launch1 := &structs.PeriodicLaunch{
ID: job1.ID,
Namespace: job1.Namespace,
Launch: time.Now(),
}
state.UpsertPeriodicLaunch(1000, launch1)
job2 := mock.Job()
launch2 := &structs.PeriodicLaunch{ID: job2.ID, Launch: time.Now()}
launch2 := &structs.PeriodicLaunch{
ID: job2.ID,
Namespace: job2.Namespace,
Launch: time.Now(),
}
state.UpsertPeriodicLaunch(1001, launch2)
// Verify the contents
fsm2 := testSnapshotRestore(t, fsm)
state2 := fsm2.State()
ws := memdb.NewWatchSet()
out1, _ := state2.PeriodicLaunchByID(ws, launch1.ID)
out2, _ := state2.PeriodicLaunchByID(ws, launch2.ID)
out1, _ := state2.PeriodicLaunchByID(ws, launch1.Namespace, launch1.ID)
out2, _ := state2.PeriodicLaunchByID(ws, launch2.Namespace, launch2.ID)
if !cmp.Equal(launch1, out1) {
t.Fatalf("bad: %v", cmp.Diff(launch1, out1))
@ -1895,17 +1933,17 @@ func TestFSM_SnapshotRestore_JobSummary(t *testing.T) {
job1 := mock.Job()
state.UpsertJob(1000, job1)
ws := memdb.NewWatchSet()
js1, _ := state.JobSummaryByID(ws, job1.ID)
js1, _ := state.JobSummaryByID(ws, job1.Namespace, job1.ID)
job2 := mock.Job()
state.UpsertJob(1001, job2)
js2, _ := state.JobSummaryByID(ws, job2.ID)
js2, _ := state.JobSummaryByID(ws, job2.Namespace, job2.ID)
// Verify the contents
fsm2 := testSnapshotRestore(t, fsm)
state2 := fsm2.State()
out1, _ := state2.JobSummaryByID(ws, job1.ID)
out2, _ := state2.JobSummaryByID(ws, job2.ID)
out1, _ := state2.JobSummaryByID(ws, job1.Namespace, job1.ID)
out2, _ := state2.JobSummaryByID(ws, job2.Namespace, job2.ID)
if !reflect.DeepEqual(js1, out1) {
t.Fatalf("bad: \n%#v\n%#v", js1, out1)
}
@ -1952,8 +1990,8 @@ func TestFSM_SnapshotRestore_JobVersions(t *testing.T) {
ws := memdb.NewWatchSet()
fsm2 := testSnapshotRestore(t, fsm)
state2 := fsm2.State()
out1, _ := state2.JobByIDAndVersion(ws, job1.ID, job1.Version)
out2, _ := state2.JobByIDAndVersion(ws, job2.ID, job2.Version)
out1, _ := state2.JobByIDAndVersion(ws, job1.Namespace, job1.ID, job1.Version)
out2, _ := state2.JobByIDAndVersion(ws, job2.Namespace, job2.ID, job2.Version)
if !reflect.DeepEqual(job1, out1) {
t.Fatalf("bad: \n%#v\n%#v", out1, job1)
}
@ -2039,7 +2077,7 @@ func TestFSM_SnapshotRestore_AddMissingSummary(t *testing.T) {
state.UpsertAllocs(1011, []*structs.Allocation{alloc})
// Delete the summary
state.DeleteJobSummary(1040, alloc.Job.ID)
state.DeleteJobSummary(1040, alloc.Namespace, alloc.Job.ID)
// Delete the index
if err := state.RemoveIndex("job_summary"); err != nil {
@ -2051,9 +2089,10 @@ func TestFSM_SnapshotRestore_AddMissingSummary(t *testing.T) {
latestIndex, _ := state.LatestIndex()
ws := memdb.NewWatchSet()
out, _ := state2.JobSummaryByID(ws, alloc.Job.ID)
out, _ := state2.JobSummaryByID(ws, alloc.Namespace, alloc.Job.ID)
expected := structs.JobSummary{
JobID: alloc.Job.ID,
Namespace: alloc.Job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
Starting: 1,
@ -2089,8 +2128,8 @@ func TestFSM_ReconcileSummaries(t *testing.T) {
state.UpsertAllocs(1011, []*structs.Allocation{alloc})
// Delete the summaries
state.DeleteJobSummary(1030, job1.ID)
state.DeleteJobSummary(1040, alloc.Job.ID)
state.DeleteJobSummary(1030, job1.Namespace, job1.ID)
state.DeleteJobSummary(1040, alloc.Namespace, alloc.Job.ID)
req := structs.GenericRequest{}
buf, err := structs.Encode(structs.ReconcileJobSummariesRequestType, req)
@ -2104,9 +2143,10 @@ func TestFSM_ReconcileSummaries(t *testing.T) {
}
ws := memdb.NewWatchSet()
out1, _ := state.JobSummaryByID(ws, job1.ID)
out1, _ := state.JobSummaryByID(ws, job1.Namespace, job1.ID)
expected := structs.JobSummary{
JobID: job1.ID,
Namespace: job1.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
Queued: 10,
@ -2122,9 +2162,10 @@ func TestFSM_ReconcileSummaries(t *testing.T) {
// This exercises the code path which adds the allocations made by the
// planner and the number of unplaced allocations in the reconcile summaries
// codepath
out2, _ := state.JobSummaryByID(ws, alloc.Job.ID)
out2, _ := state.JobSummaryByID(ws, alloc.Namespace, alloc.Job.ID)
expected = structs.JobSummary{
JobID: alloc.Job.ID,
Namespace: alloc.Job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
Queued: 9,

View File

@ -85,7 +85,7 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis
return err
}
ws := memdb.NewWatchSet()
existingJob, err := snap.JobByID(ws, args.Job.ID)
existingJob, err := snap.JobByID(ws, args.RequestNamespace(), args.Job.ID)
if err != nil {
return err
}
@ -178,6 +178,7 @@ func (j *Job) Register(args *structs.JobRegisterRequest, reply *structs.JobRegis
// Create a new evaluation
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: args.RequestNamespace(),
Priority: args.Job.Priority,
Type: args.Job.Type,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -290,7 +291,7 @@ func (j *Job) Summary(args *structs.JobSummaryRequest,
queryMeta: &reply.QueryMeta,
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Look for job summary
out, err := state.JobSummaryByID(ws, args.JobID)
out, err := state.JobSummaryByID(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -364,7 +365,7 @@ func (j *Job) Revert(args *structs.JobRevertRequest, reply *structs.JobRegisterR
}
ws := memdb.NewWatchSet()
cur, err := snap.JobByID(ws, args.JobID)
cur, err := snap.JobByID(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -375,12 +376,12 @@ func (j *Job) Revert(args *structs.JobRevertRequest, reply *structs.JobRegisterR
return fmt.Errorf("can't revert to current version")
}
jobV, err := snap.JobByIDAndVersion(ws, args.JobID, args.JobVersion)
jobV, err := snap.JobByIDAndVersion(ws, args.RequestNamespace(), args.JobID, args.JobVersion)
if err != nil {
return err
}
if jobV == nil {
return fmt.Errorf("job %q at version %d not found", args.JobID, args.JobVersion)
return fmt.Errorf("job %q in namespace %q at version %d not found", args.JobID, args.RequestNamespace(), args.JobVersion)
}
// Build the register request
@ -422,12 +423,12 @@ func (j *Job) Stable(args *structs.JobStabilityRequest, reply *structs.JobStabil
}
ws := memdb.NewWatchSet()
jobV, err := snap.JobByIDAndVersion(ws, args.JobID, args.JobVersion)
jobV, err := snap.JobByIDAndVersion(ws, args.RequestNamespace(), args.JobID, args.JobVersion)
if err != nil {
return err
}
if jobV == nil {
return fmt.Errorf("job %q at version %d not found", args.JobID, args.JobVersion)
return fmt.Errorf("job %q in namespace %q at version %d not found", args.JobID, args.RequestNamespace(), args.JobVersion)
}
// Commit this stability request via Raft
@ -460,7 +461,7 @@ func (j *Job) Evaluate(args *structs.JobEvaluateRequest, reply *structs.JobRegis
return err
}
ws := memdb.NewWatchSet()
job, err := snap.JobByID(ws, args.JobID)
job, err := snap.JobByID(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -477,6 +478,7 @@ func (j *Job) Evaluate(args *structs.JobEvaluateRequest, reply *structs.JobRegis
// Create a new evaluation
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: args.RequestNamespace(),
Priority: job.Priority,
Type: job.Type,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -522,7 +524,7 @@ func (j *Job) Deregister(args *structs.JobDeregisterRequest, reply *structs.JobD
return err
}
ws := memdb.NewWatchSet()
job, err := snap.JobByID(ws, args.JobID)
job, err := snap.JobByID(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -548,6 +550,7 @@ func (j *Job) Deregister(args *structs.JobDeregisterRequest, reply *structs.JobD
// since all should be able to handle deregistration in the same way.
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: args.RequestNamespace(),
Priority: structs.JobDefaultPriority,
Type: structs.JobTypeService,
TriggeredBy: structs.EvalTriggerJobDeregister,
@ -588,7 +591,7 @@ func (j *Job) GetJob(args *structs.JobSpecificRequest,
queryMeta: &reply.QueryMeta,
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Look for the job
out, err := state.JobByID(ws, args.JobID)
out, err := state.JobByID(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -627,7 +630,7 @@ func (j *Job) GetJobVersions(args *structs.JobVersionsRequest,
queryMeta: &reply.QueryMeta,
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Look for the job
out, err := state.JobVersionsByID(ws, args.JobID)
out, err := state.JobVersionsByID(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -681,9 +684,9 @@ func (j *Job) List(args *structs.JobListRequest,
var err error
var iter memdb.ResultIterator
if prefix := args.QueryOptions.Prefix; prefix != "" {
iter, err = state.JobsByIDPrefix(ws, prefix)
iter, err = state.JobsByIDPrefix(ws, args.RequestNamespace(), prefix)
} else {
iter, err = state.Jobs(ws)
iter, err = state.JobsByNamespace(ws, args.RequestNamespace())
}
if err != nil {
return err
@ -696,7 +699,7 @@ func (j *Job) List(args *structs.JobListRequest,
break
}
job := raw.(*structs.Job)
summary, err := state.JobSummaryByID(ws, job.ID)
summary, err := state.JobSummaryByID(ws, args.RequestNamespace(), job.ID)
if err != nil {
return fmt.Errorf("unable to look up summary for job: %v", job.ID)
}
@ -732,7 +735,7 @@ func (j *Job) Allocations(args *structs.JobSpecificRequest,
queryMeta: &reply.QueryMeta,
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Capture the allocations
allocs, err := state.AllocsByJob(ws, args.JobID, args.AllAllocs)
allocs, err := state.AllocsByJob(ws, args.RequestNamespace(), args.JobID, args.AllAllocs)
if err != nil {
return err
}
@ -775,7 +778,7 @@ func (j *Job) Evaluations(args *structs.JobSpecificRequest,
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Capture the evals
var err error
reply.Evaluations, err = state.EvalsByJob(ws, args.JobID)
reply.Evaluations, err = state.EvalsByJob(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -809,7 +812,7 @@ func (j *Job) Deployments(args *structs.JobSpecificRequest,
queryMeta: &reply.QueryMeta,
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Capture the deployments
deploys, err := state.DeploymentsByJobID(ws, args.JobID)
deploys, err := state.DeploymentsByJobID(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -844,7 +847,7 @@ func (j *Job) LatestDeployment(args *structs.JobSpecificRequest,
queryMeta: &reply.QueryMeta,
run: func(ws memdb.WatchSet, state *state.StateStore) error {
// Capture the deployments
deploys, err := state.DeploymentsByJobID(ws, args.JobID)
deploys, err := state.DeploymentsByJobID(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -906,7 +909,7 @@ func (j *Job) Plan(args *structs.JobPlanRequest, reply *structs.JobPlanResponse)
// Get the original job
ws := memdb.NewWatchSet()
oldJob, err := snap.JobByID(ws, args.Job.ID)
oldJob, err := snap.JobByID(ws, args.RequestNamespace(), args.Job.ID)
if err != nil {
return err
}
@ -932,6 +935,7 @@ func (j *Job) Plan(args *structs.JobPlanRequest, reply *structs.JobPlanResponse)
// Create an eval and mark it as requiring annotations and insert that as well
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: args.RequestNamespace(),
Priority: args.Job.Priority,
Type: args.Job.Type,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1100,7 +1104,7 @@ func (j *Job) Dispatch(args *structs.JobDispatchRequest, reply *structs.JobDispa
return err
}
ws := memdb.NewWatchSet()
parameterizedJob, err := snap.JobByID(ws, args.JobID)
parameterizedJob, err := snap.JobByID(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -1161,6 +1165,7 @@ func (j *Job) Dispatch(args *structs.JobDispatchRequest, reply *structs.JobDispa
// Create a new evaluation
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: args.RequestNamespace(),
Priority: dispatchJob.Priority,
Type: dispatchJob.Type,
TriggeredBy: structs.EvalTriggerJobRegister,

View File

@ -30,7 +30,10 @@ func TestJobEndpoint_Register(t *testing.T) {
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -45,7 +48,7 @@ func TestJobEndpoint_Register(t *testing.T) {
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -127,7 +130,7 @@ func TestJobEndpoint_Register_ACL(t *testing.T) {
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -151,7 +154,10 @@ func TestJobEndpoint_Register_InvalidDriverConfig(t *testing.T) {
job.TaskGroups[0].Tasks[0].Config["foo"] = 1
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -181,7 +187,10 @@ func TestJobEndpoint_Register_Payload(t *testing.T) {
job.Payload = []byte{0x1}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -209,7 +218,10 @@ func TestJobEndpoint_Register_Existing(t *testing.T) {
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -238,7 +250,7 @@ func TestJobEndpoint_Register_Existing(t *testing.T) {
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -297,7 +309,7 @@ func TestJobEndpoint_Register_Existing(t *testing.T) {
// the same job
state = s1.fsm.State()
ws = memdb.NewWatchSet()
out, err = state.JobByID(ws, job.ID)
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -322,7 +334,10 @@ func TestJobEndpoint_Register_Periodic(t *testing.T) {
job := mock.PeriodicJob()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -337,7 +352,7 @@ func TestJobEndpoint_Register_Periodic(t *testing.T) {
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -373,7 +388,10 @@ func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) {
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -388,7 +406,7 @@ func TestJobEndpoint_Register_ParameterizedJob(t *testing.T) {
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -418,7 +436,10 @@ func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
Job: job,
EnforceIndex: true,
JobModifyIndex: 100, // Not registered yet so not possible
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -433,7 +454,10 @@ func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
Job: job,
EnforceIndex: true,
JobModifyIndex: 0,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -449,7 +473,7 @@ func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -465,7 +489,10 @@ func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
Job: job,
EnforceIndex: true,
JobModifyIndex: 0,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -479,7 +506,10 @@ func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
Job: job,
EnforceIndex: true,
JobModifyIndex: curIndex - 1,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -494,7 +524,10 @@ func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
Job: job,
EnforceIndex: true,
JobModifyIndex: curIndex,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -505,7 +538,7 @@ func TestJobEndpoint_Register_EnforceIndex(t *testing.T) {
t.Fatalf("bad index: %d", resp.Index)
}
out, err = state.JobByID(ws, job.ID)
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -536,7 +569,10 @@ func TestJobEndpoint_Register_Vault_Disabled(t *testing.T) {
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -572,7 +608,10 @@ func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) {
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -585,7 +624,7 @@ func TestJobEndpoint_Register_Vault_AllowUnauthenticated(t *testing.T) {
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -623,7 +662,10 @@ func TestJobEndpoint_Register_Vault_NoToken(t *testing.T) {
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -682,7 +724,10 @@ func TestJobEndpoint_Register_Vault_Policies(t *testing.T) {
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -711,7 +756,7 @@ func TestJobEndpoint_Register_Vault_Policies(t *testing.T) {
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -745,7 +790,10 @@ func TestJobEndpoint_Register_Vault_Policies(t *testing.T) {
}
req = &structs.JobRegisterRequest{
Job: job2,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -754,7 +802,7 @@ func TestJobEndpoint_Register_Vault_Policies(t *testing.T) {
}
// Check for the job in the FSM
out, err = state.JobByID(ws, job2.ID)
out, err = state.JobByID(ws, job2.Namespace, job2.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -783,7 +831,10 @@ func TestJobEndpoint_Revert(t *testing.T) {
job.Priority = 100
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -800,7 +851,10 @@ func TestJobEndpoint_Revert(t *testing.T) {
job2.Priority = 1
req = &structs.JobRegisterRequest{
Job: job2,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -816,7 +870,10 @@ func TestJobEndpoint_Revert(t *testing.T) {
JobID: job.ID,
JobVersion: 0,
EnforcePriorVersion: helper.Uint64ToPtr(10),
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -829,7 +886,10 @@ func TestJobEndpoint_Revert(t *testing.T) {
revertReq = &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 1,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -843,7 +903,10 @@ func TestJobEndpoint_Revert(t *testing.T) {
JobID: job.ID,
JobVersion: 0,
EnforcePriorVersion: helper.Uint64ToPtr(1),
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -865,7 +928,10 @@ func TestJobEndpoint_Revert(t *testing.T) {
revertReq = &structs.JobRevertRequest{
JobID: job.ID,
JobVersion: 0,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -886,7 +952,7 @@ func TestJobEndpoint_Revert(t *testing.T) {
// created
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -911,7 +977,7 @@ func TestJobEndpoint_Revert(t *testing.T) {
t.Fatalf("job id mis-match")
}
versions, err := state.JobVersionsByID(ws, job.ID)
versions, err := state.JobVersionsByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -933,7 +999,10 @@ func TestJobEndpoint_Stable(t *testing.T) {
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -950,7 +1019,10 @@ func TestJobEndpoint_Stable(t *testing.T) {
JobID: job.ID,
JobVersion: 0,
Stable: true,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -965,7 +1037,7 @@ func TestJobEndpoint_Stable(t *testing.T) {
// Check that the job is marked stable
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -990,7 +1062,10 @@ func TestJobEndpoint_Evaluate(t *testing.T) {
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1005,7 +1080,10 @@ func TestJobEndpoint_Evaluate(t *testing.T) {
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1063,7 +1141,10 @@ func TestJobEndpoint_Evaluate_Periodic(t *testing.T) {
job := mock.PeriodicJob()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1078,7 +1159,10 @@ func TestJobEndpoint_Evaluate_Periodic(t *testing.T) {
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1102,7 +1186,10 @@ func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) {
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1117,7 +1204,10 @@ func TestJobEndpoint_Evaluate_ParameterizedJob(t *testing.T) {
// Force a re-evaluation
reEval := &structs.JobEvaluateRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1139,7 +1229,10 @@ func TestJobEndpoint_Deregister(t *testing.T) {
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1152,7 +1245,10 @@ func TestJobEndpoint_Deregister(t *testing.T) {
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: false,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
@ -1165,7 +1261,7 @@ func TestJobEndpoint_Deregister(t *testing.T) {
// Check for the job in the FSM
ws := memdb.NewWatchSet()
state := s1.fsm.State()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1211,7 +1307,10 @@ func TestJobEndpoint_Deregister(t *testing.T) {
dereg2 := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp3 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg2, &resp3); err != nil {
@ -1222,7 +1321,7 @@ func TestJobEndpoint_Deregister(t *testing.T) {
}
// Check for the job in the FSM
out, err = state.JobByID(ws, job.ID)
out, err = state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1275,7 +1374,10 @@ func TestJobEndpoint_Deregister_NonExistent(t *testing.T) {
jobID := "foo"
dereg := &structs.JobDeregisterRequest{
JobID: jobID,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
@ -1332,7 +1434,10 @@ func TestJobEndpoint_Deregister_Periodic(t *testing.T) {
job := mock.PeriodicJob()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1345,7 +1450,10 @@ func TestJobEndpoint_Deregister_Periodic(t *testing.T) {
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
@ -1358,7 +1466,7 @@ func TestJobEndpoint_Deregister_Periodic(t *testing.T) {
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1386,7 +1494,10 @@ func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) {
job.ParameterizedJob = &structs.ParameterizedJobConfig{}
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1399,7 +1510,10 @@ func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) {
dereg := &structs.JobDeregisterRequest{
JobID: job.ID,
Purge: true,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobDeregisterResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Deregister", dereg, &resp2); err != nil {
@ -1412,7 +1526,7 @@ func TestJobEndpoint_Deregister_ParameterizedJob(t *testing.T) {
// Check for the node in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -1436,7 +1550,10 @@ func TestJobEndpoint_GetJob(t *testing.T) {
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1451,7 +1568,10 @@ func TestJobEndpoint_GetJob(t *testing.T) {
// Lookup the job
get := &structs.JobSpecificRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.SingleJobResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJob", get, &resp2); err != nil {
@ -1526,6 +1646,7 @@ func TestJobEndpoint_GetJob_Blocking(t *testing.T) {
JobID: job2.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job2.Namespace,
MinQueryIndex: 150,
},
}
@ -1547,7 +1668,7 @@ func TestJobEndpoint_GetJob_Blocking(t *testing.T) {
// Job delete fires watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.DeleteJob(300, job2.ID); err != nil {
if err := state.DeleteJob(300, job2.Namespace, job2.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
@ -1583,7 +1704,10 @@ func TestJobEndpoint_GetJobVersions(t *testing.T) {
job.Priority = 88
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1601,7 +1725,10 @@ func TestJobEndpoint_GetJobVersions(t *testing.T) {
// Lookup the job
get := &structs.JobVersionsRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var versionsResp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
@ -1649,7 +1776,10 @@ func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) {
job.Priority = 88
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1674,7 +1804,10 @@ func TestJobEndpoint_GetJobVersions_Diff(t *testing.T) {
get := &structs.JobVersionsRequest{
JobID: job.ID,
Diffs: true,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var versionsResp structs.JobVersionsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.GetJobVersions", get, &versionsResp); err != nil {
@ -1760,6 +1893,7 @@ func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) {
JobID: job2.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job2.Namespace,
MinQueryIndex: 150,
},
}
@ -1790,6 +1924,7 @@ func TestJobEndpoint_GetJobVersions_Blocking(t *testing.T) {
JobID: job3.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job3.Namespace,
MinQueryIndex: 250,
},
}
@ -1824,7 +1959,10 @@ func TestJobEndpoint_GetJobSummary(t *testing.T) {
job := mock.Job()
reg := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -1839,7 +1977,10 @@ func TestJobEndpoint_GetJobSummary(t *testing.T) {
// Lookup the job summary
get := &structs.JobSummaryRequest{
JobID: job.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobSummaryResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Summary", get, &resp2); err != nil {
@ -1851,6 +1992,7 @@ func TestJobEndpoint_GetJobSummary(t *testing.T) {
expectedJobSummary := structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{},
},
@ -1885,6 +2027,7 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) {
JobID: job1.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job1.Namespace,
MinQueryIndex: 50,
},
}
@ -1910,6 +2053,7 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) {
JobID: job1.ID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job1.Namespace,
MinQueryIndex: 199,
},
}
@ -1932,7 +2076,7 @@ func TestJobEndpoint_GetJobSummary_Blocking(t *testing.T) {
// Job delete fires watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.DeleteJob(300, job1.ID); err != nil {
if err := state.DeleteJob(300, job1.Namespace, job1.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
@ -1973,7 +2117,10 @@ func TestJobEndpoint_ListJobs(t *testing.T) {
// Lookup the jobs
get := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp2 structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp2); err != nil {
@ -1992,7 +2139,11 @@ func TestJobEndpoint_ListJobs(t *testing.T) {
// Lookup the jobs by prefix
get = &structs.JobListRequest{
QueryOptions: structs.QueryOptions{Region: "global", Prefix: resp2.Jobs[0].ID[:4]},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
Prefix: resp2.Jobs[0].ID[:4],
},
}
var resp3 structs.JobListResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.List", get, &resp3); err != nil {
@ -2031,6 +2182,7 @@ func TestJobEndpoint_ListJobs_Blocking(t *testing.T) {
req := &structs.JobListRequest{
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
MinQueryIndex: 50,
},
}
@ -2052,7 +2204,7 @@ func TestJobEndpoint_ListJobs_Blocking(t *testing.T) {
// Job deletion triggers watches
time.AfterFunc(100*time.Millisecond, func() {
if err := state.DeleteJob(200, job.ID); err != nil {
if err := state.DeleteJob(200, job.Namespace, job.ID); err != nil {
t.Fatalf("err: %v", err)
}
})
@ -2098,7 +2250,10 @@ func TestJobEndpoint_Allocations(t *testing.T) {
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: alloc1.JobID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: alloc1.Job.Namespace,
},
}
var resp2 structs.JobAllocationsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Allocations", get, &resp2); err != nil {
@ -2149,6 +2304,7 @@ func TestJobEndpoint_Allocations_Blocking(t *testing.T) {
JobID: "job1",
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: alloc1.Job.Namespace,
MinQueryIndex: 150,
},
}
@ -2190,7 +2346,10 @@ func TestJobEndpoint_Evaluations(t *testing.T) {
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: eval1.JobID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
}
var resp2 structs.JobEvaluationsResponse
if err := msgpackrpc.CallWithCodec(codec, "Job.Evaluations", get, &resp2); err != nil {
@ -2239,6 +2398,7 @@ func TestJobEndpoint_Evaluations_Blocking(t *testing.T) {
JobID: "job1",
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
MinQueryIndex: 150,
},
}
@ -2281,7 +2441,10 @@ func TestJobEndpoint_Deployments(t *testing.T) {
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: j.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: j.Namespace,
},
}
var resp structs.DeploymentListResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Job.Deployments", get, &resp), "RPC")
@ -2320,6 +2483,7 @@ func TestJobEndpoint_Deployments_Blocking(t *testing.T) {
JobID: d2.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: d2.Namespace,
MinQueryIndex: 150,
},
}
@ -2358,7 +2522,10 @@ func TestJobEndpoint_LatestDeployment(t *testing.T) {
// Lookup the jobs
get := &structs.JobSpecificRequest{
JobID: j.ID,
QueryOptions: structs.QueryOptions{Region: "global"},
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: j.Namespace,
},
}
var resp structs.SingleDeploymentResponse
assert.Nil(msgpackrpc.CallWithCodec(codec, "Job.LatestDeployment", get, &resp), "RPC")
@ -2398,6 +2565,7 @@ func TestJobEndpoint_LatestDeployment_Blocking(t *testing.T) {
JobID: d2.JobID,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: d2.Namespace,
MinQueryIndex: 150,
},
}
@ -2425,7 +2593,10 @@ func TestJobEndpoint_Plan_WithDiff(t *testing.T) {
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -2441,7 +2612,10 @@ func TestJobEndpoint_Plan_WithDiff(t *testing.T) {
planReq := &structs.JobPlanRequest{
Job: job,
Diff: true,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -2478,7 +2652,10 @@ func TestJobEndpoint_Plan_NoDiff(t *testing.T) {
job := mock.Job()
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -2494,7 +2671,10 @@ func TestJobEndpoint_Plan_NoDiff(t *testing.T) {
planReq := &structs.JobPlanRequest{
Job: job,
Diff: false,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -2550,7 +2730,10 @@ func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) {
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -2562,7 +2745,7 @@ func TestJobEndpoint_ImplicitConstraints_Vault(t *testing.T) {
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -2607,7 +2790,10 @@ func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) {
}
req := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -2619,7 +2805,7 @@ func TestJobEndpoint_ImplicitConstraints_Signals(t *testing.T) {
// Check for the job in the FSM
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, job.ID)
out, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
t.Fatalf("err: %v", err)
}
@ -2902,7 +3088,10 @@ func TestJobEndpoint_Dispatch(t *testing.T) {
// Create the register request
regReq := &structs.JobRegisterRequest{
Job: tc.parameterizedJob,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: tc.parameterizedJob.Namespace,
},
}
// Fetch the response
@ -2913,7 +3102,10 @@ func TestJobEndpoint_Dispatch(t *testing.T) {
// Now try to dispatch
tc.dispatchReq.JobID = tc.parameterizedJob.ID
tc.dispatchReq.WriteRequest = structs.WriteRequest{Region: "global"}
tc.dispatchReq.WriteRequest = structs.WriteRequest{
Region: "global",
Namespace: tc.parameterizedJob.Namespace,
}
var dispatchResp structs.JobDispatchResponse
dispatchErr := msgpackrpc.CallWithCodec(codec, "Job.Dispatch", tc.dispatchReq, &dispatchResp)
@ -2941,7 +3133,7 @@ func TestJobEndpoint_Dispatch(t *testing.T) {
state := s1.fsm.State()
ws := memdb.NewWatchSet()
out, err := state.JobByID(ws, dispatchResp.DispatchedJobID)
out, err := state.JobByID(ws, tc.parameterizedJob.Namespace, dispatchResp.DispatchedJobID)
if err != nil {
t.Fatalf("err: %v", err)
}

View File

@ -203,6 +203,12 @@ func (s *Server) establishLeadership(stopCh chan struct{}) error {
go s.replicateACLPolicies(stopCh)
go s.replicateACLTokens(stopCh)
}
// Setup any enterprise systems required.
if err := s.establishEnterpriseLeadership(); err != nil {
return err
}
return nil
}
@ -313,7 +319,7 @@ func (s *Server) restorePeriodicDispatcher() error {
// If the periodic job has never been launched before, launch will hold
// the time the periodic job was added. Otherwise it has the last launch
// time of the periodic job.
launch, err := s.fsm.State().PeriodicLaunchByID(ws, job.ID)
launch, err := s.fsm.State().PeriodicLaunchByID(ws, job.Namespace, job.ID)
if err != nil || launch == nil {
return fmt.Errorf("failed to get periodic launch time: %v", err)
}
@ -328,7 +334,7 @@ func (s *Server) restorePeriodicDispatcher() error {
continue
}
if _, err := s.periodicDispatcher.ForceRun(job.ID); err != nil {
if _, err := s.periodicDispatcher.ForceRun(job.Namespace, job.ID); err != nil {
msg := fmt.Sprintf("force run of periodic job %q failed: %v", job.ID, err)
s.logger.Printf("[ERR] nomad.periodic: %s", msg)
return errors.New(msg)
@ -386,6 +392,7 @@ func (s *Server) schedulePeriodic(stopCh chan struct{}) {
func (s *Server) coreJobEval(job string, modifyIndex uint64) *structs.Evaluation {
return &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: "-",
Priority: structs.CoreJobPriority,
Type: structs.JobTypeCore,
TriggeredBy: structs.EvalTriggerScheduled,
@ -513,6 +520,11 @@ func (s *Server) revokeLeadership() error {
return err
}
// Disable any enterprise systems required.
if err := s.revokeEnterpriseLeadership(); err != nil {
return err
}
// Clear the heartbeat timers on either shutdown or step down,
// since we are no longer responsible for TTL expirations.
if err := s.clearAllHeartbeatTimers(); err != nil {

13
nomad/leader_oss.go Normal file
View File

@ -0,0 +1,13 @@
// +build !pro,!ent
package nomad
// establishEnterpriseLeadership is a no-op on OSS.
func (s *Server) establishEnterpriseLeadership() error {
return nil
}
// revokeEnterpriseLeadership is a no-op on OSS>
func (s *Server) revokeEnterpriseLeadership() error {
return nil
}

View File

@ -339,6 +339,9 @@ func TestLeader_PeriodicDispatcher_Restore_Adds(t *testing.T) {
for _, job := range []*structs.Job{nonPeriodic, periodic, parameterizedPeriodic} {
req := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Namespace: job.Namespace,
},
}
_, _, err := leader.raftApply(structs.JobRegisterRequestType, req)
if err != nil {
@ -364,15 +367,28 @@ func TestLeader_PeriodicDispatcher_Restore_Adds(t *testing.T) {
t.Fatalf("should have leader")
})
tuplePeriodic := structs.NamespacedID{
ID: periodic.ID,
Namespace: periodic.Namespace,
}
tupleNonPeriodic := structs.NamespacedID{
ID: nonPeriodic.ID,
Namespace: nonPeriodic.Namespace,
}
tupleParameterized := structs.NamespacedID{
ID: parameterizedPeriodic.ID,
Namespace: parameterizedPeriodic.Namespace,
}
// Check that the new leader is tracking the periodic job only
testutil.WaitForResult(func() (bool, error) {
if _, tracked := leader.periodicDispatcher.tracked[periodic.ID]; !tracked {
if _, tracked := leader.periodicDispatcher.tracked[tuplePeriodic]; !tracked {
return false, fmt.Errorf("periodic job not tracked")
}
if _, tracked := leader.periodicDispatcher.tracked[nonPeriodic.ID]; tracked {
if _, tracked := leader.periodicDispatcher.tracked[tupleNonPeriodic]; tracked {
return false, fmt.Errorf("non periodic job tracked")
}
if _, tracked := leader.periodicDispatcher.tracked[parameterizedPeriodic.ID]; tracked {
if _, tracked := leader.periodicDispatcher.tracked[tupleParameterized]; tracked {
return false, fmt.Errorf("parameterized periodic job tracked")
}
return true, nil
@ -393,6 +409,9 @@ func TestLeader_PeriodicDispatcher_Restore_NoEvals(t *testing.T) {
job := testPeriodicJob(launch)
req := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Namespace: job.Namespace,
},
}
_, _, err := s1.raftApply(structs.JobRegisterRequestType, req)
if err != nil {
@ -414,13 +433,17 @@ func TestLeader_PeriodicDispatcher_Restore_NoEvals(t *testing.T) {
s1.restorePeriodicDispatcher()
// Ensure the job is tracked.
if _, tracked := s1.periodicDispatcher.tracked[job.ID]; !tracked {
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
if _, tracked := s1.periodicDispatcher.tracked[tuple]; !tracked {
t.Fatalf("periodic job not restored")
}
// Check that an eval was made.
ws := memdb.NewWatchSet()
last, err := s1.fsm.State().PeriodicLaunchByID(ws, job.ID)
last, err := s1.fsm.State().PeriodicLaunchByID(ws, job.Namespace, job.ID)
if err != nil || last == nil {
t.Fatalf("failed to get periodic launch time: %v", err)
}
@ -445,6 +468,9 @@ func TestLeader_PeriodicDispatcher_Restore_Evals(t *testing.T) {
job := testPeriodicJob(past, now, future)
req := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Namespace: job.Namespace,
},
}
_, _, err := s1.raftApply(structs.JobRegisterRequestType, req)
if err != nil {
@ -465,13 +491,17 @@ func TestLeader_PeriodicDispatcher_Restore_Evals(t *testing.T) {
s1.restorePeriodicDispatcher()
// Ensure the job is tracked.
if _, tracked := s1.periodicDispatcher.tracked[job.ID]; !tracked {
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
if _, tracked := s1.periodicDispatcher.tracked[tuple]; !tracked {
t.Fatalf("periodic job not restored")
}
// Check that an eval was made.
ws := memdb.NewWatchSet()
last, err := s1.fsm.State().PeriodicLaunchByID(ws, job.ID)
last, err := s1.fsm.State().PeriodicLaunchByID(ws, job.Namespace, job.ID)
if err != nil || last == nil {
t.Fatalf("failed to get periodic launch time: %v", err)
}
@ -535,7 +565,7 @@ func TestLeader_ReapFailedEval(t *testing.T) {
}
// See if there is a followup
evals, err := state.EvalsByJob(ws, eval.JobID)
evals, err := state.EvalsByJob(ws, eval.Namespace, eval.JobID)
if err != nil {
return false, err
}

View File

@ -65,6 +65,7 @@ func Job() *structs.Job {
Region: "global",
ID: structs.GenerateUUID(),
Name: "my-job",
Namespace: structs.DefaultNamespace,
Type: structs.JobTypeService,
Priority: 50,
AllAtOnce: false,
@ -159,6 +160,7 @@ func Job() *structs.Job {
func SystemJob() *structs.Job {
job := &structs.Job{
Region: "global",
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Name: "my-job",
Type: structs.JobTypeSystem,
@ -232,6 +234,7 @@ func PeriodicJob() *structs.Job {
func Eval() *structs.Evaluation {
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: structs.DefaultNamespace,
Priority: 50,
Type: structs.JobTypeService,
JobID: structs.GenerateUUID(),
@ -243,6 +246,7 @@ func Eval() *structs.Evaluation {
func JobSummary(jobID string) *structs.JobSummary {
js := &structs.JobSummary{
JobID: jobID,
Namespace: structs.DefaultNamespace,
Summary: map[string]structs.TaskGroupSummary{
"web": {
Queued: 0,
@ -258,6 +262,7 @@ func Alloc() *structs.Allocation {
ID: structs.GenerateUUID(),
EvalID: structs.GenerateUUID(),
NodeID: "12345678-abcd-efab-cdef-123456789abc",
Namespace: structs.DefaultNamespace,
TaskGroup: "web",
Resources: &structs.Resources{
CPU: 500,
@ -313,6 +318,7 @@ func Deployment() *structs.Deployment {
return &structs.Deployment{
ID: structs.GenerateUUID(),
JobID: structs.GenerateUUID(),
Namespace: structs.DefaultNamespace,
JobVersion: 2,
JobModifyIndex: 20,
JobCreateIndex: 18,

View File

@ -844,6 +844,7 @@ func (n *Node) createNodeEvals(nodeID string, nodeIndex uint64) ([]string, uint6
// Create a new eval
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: alloc.Namespace,
Priority: alloc.Job.Priority,
Type: alloc.Job.Type,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -867,6 +868,7 @@ func (n *Node) createNodeEvals(nodeID string, nodeIndex uint64) ([]string, uint6
// Create a new eval
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: job.Namespace,
Priority: job.Priority,
Type: job.Type,
TriggeredBy: structs.EvalTriggerNodeUpdate,

View File

@ -683,7 +683,10 @@ func TestClientEndpoint_Drain_Down(t *testing.T) {
job.TaskGroups[0].Count = 1
jobReq := &structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq, &jobResp); err != nil {
t.Fatalf("err: %v", err)
@ -696,7 +699,10 @@ func TestClientEndpoint_Drain_Down(t *testing.T) {
job1.Type = structs.JobTypeSystem
jobReq1 := &structs.JobRegisterRequest{
Job: job1,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job1.Namespace,
},
}
if err := msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq1, &jobResp1); err != nil {
t.Fatalf("err: %v", err)
@ -705,11 +711,11 @@ func TestClientEndpoint_Drain_Down(t *testing.T) {
// Wait for the scheduler to create an allocation
testutil.WaitForResult(func() (bool, error) {
ws := memdb.NewWatchSet()
allocs, err := s1.fsm.state.AllocsByJob(ws, job.ID, true)
allocs, err := s1.fsm.state.AllocsByJob(ws, job.Namespace, job.ID, true)
if err != nil {
return false, err
}
allocs1, err := s1.fsm.state.AllocsByJob(ws, job1.ID, true)
allocs1, err := s1.fsm.state.AllocsByJob(ws, job1.Namespace, job1.ID, true)
if err != nil {
return false, err
}
@ -742,12 +748,13 @@ func TestClientEndpoint_Drain_Down(t *testing.T) {
// Ensure that the allocation has transitioned to lost
testutil.WaitForResult(func() (bool, error) {
ws := memdb.NewWatchSet()
summary, err := s1.fsm.state.JobSummaryByID(ws, job.ID)
summary, err := s1.fsm.state.JobSummaryByID(ws, job.Namespace, job.ID)
if err != nil {
return false, err
}
expectedSummary := &structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
Queued: 1,
@ -762,12 +769,13 @@ func TestClientEndpoint_Drain_Down(t *testing.T) {
return false, fmt.Errorf("expected: %#v, actual: %#v", expectedSummary, summary)
}
summary1, err := s1.fsm.state.JobSummaryByID(ws, job1.ID)
summary1, err := s1.fsm.state.JobSummaryByID(ws, job1.Namespace, job1.ID)
if err != nil {
return false, err
}
expectedSummary1 := &structs.JobSummary{
JobID: job1.ID,
Namespace: job1.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
Lost: 1,

View File

@ -21,7 +21,7 @@ type PeriodicDispatch struct {
dispatcher JobEvalDispatcher
enabled bool
tracked map[string]*structs.Job
tracked map[structs.NamespacedID]*structs.Job
heap *periodicHeap
updateCh chan struct{}
@ -46,7 +46,12 @@ type JobEvalDispatcher interface {
func (s *Server) DispatchJob(job *structs.Job) (*structs.Evaluation, error) {
// Commit this update via Raft
job.SetSubmitTime()
req := structs.JobRegisterRequest{Job: job}
req := structs.JobRegisterRequest{
Job: job,
WriteRequest: structs.WriteRequest{
Namespace: job.Namespace,
},
}
_, index, err := s.raftApply(structs.JobRegisterRequestType, req)
if err != nil {
return nil, err
@ -55,6 +60,7 @@ func (s *Server) DispatchJob(job *structs.Job) (*structs.Evaluation, error) {
// Create a new evaluation
eval := &structs.Evaluation{
ID: structs.GenerateUUID(),
Namespace: job.Namespace,
Priority: job.Priority,
Type: job.Type,
TriggeredBy: structs.EvalTriggerPeriodicJob,
@ -89,7 +95,7 @@ func (s *Server) RunningChildren(job *structs.Job) (bool, error) {
ws := memdb.NewWatchSet()
prefix := fmt.Sprintf("%s%s", job.ID, structs.PeriodicLaunchSuffix)
iter, err := state.JobsByIDPrefix(ws, prefix)
iter, err := state.JobsByIDPrefix(ws, job.Namespace, prefix)
if err != nil {
return false, err
}
@ -104,7 +110,7 @@ func (s *Server) RunningChildren(job *structs.Job) (bool, error) {
}
// Get the childs evaluations.
evals, err := state.EvalsByJob(ws, child.ID)
evals, err := state.EvalsByJob(ws, child.Namespace, child.ID)
if err != nil {
return false, err
}
@ -137,7 +143,7 @@ func (s *Server) RunningChildren(job *structs.Job) (bool, error) {
func NewPeriodicDispatch(logger *log.Logger, dispatcher JobEvalDispatcher) *PeriodicDispatch {
return &PeriodicDispatch{
dispatcher: dispatcher,
tracked: make(map[string]*structs.Job),
tracked: make(map[structs.NamespacedID]*structs.Job),
heap: NewPeriodicHeap(),
updateCh: make(chan struct{}, 1),
logger: logger,
@ -192,10 +198,15 @@ func (p *PeriodicDispatch) Add(job *structs.Job) error {
// If we were tracking a job and it has been disabled or made non-periodic remove it.
disabled := !job.IsPeriodic() || !job.Periodic.Enabled
_, tracked := p.tracked[job.ID]
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
_, tracked := p.tracked[tuple]
if disabled {
if tracked {
p.removeLocked(job.ID)
p.removeLocked(tuple)
}
// If the job is disabled and we aren't tracking it, do nothing.
@ -209,18 +220,18 @@ func (p *PeriodicDispatch) Add(job *structs.Job) error {
}
// Add or update the job.
p.tracked[job.ID] = job
p.tracked[tuple] = job
next := job.Periodic.Next(time.Now().In(job.Periodic.GetLocation()))
if tracked {
if err := p.heap.Update(job, next); err != nil {
return fmt.Errorf("failed to update job %v launch time: %v", job.ID, err)
return fmt.Errorf("failed to update job %q (%s) launch time: %v", job.ID, job.Namespace, err)
}
p.logger.Printf("[DEBUG] nomad.periodic: updated periodic job %q", job.ID)
p.logger.Printf("[DEBUG] nomad.periodic: updated periodic job %q (%s)", job.ID, job.Namespace)
} else {
if err := p.heap.Push(job, next); err != nil {
return fmt.Errorf("failed to add job %v: %v", job.ID, err)
}
p.logger.Printf("[DEBUG] nomad.periodic: registered periodic job %q", job.ID)
p.logger.Printf("[DEBUG] nomad.periodic: registered periodic job %q (%s)", job.ID, job.Namespace)
}
// Signal an update.
@ -234,15 +245,18 @@ func (p *PeriodicDispatch) Add(job *structs.Job) error {
// Remove stops tracking the passed job. If the job is not tracked, it is a
// no-op.
func (p *PeriodicDispatch) Remove(jobID string) error {
func (p *PeriodicDispatch) Remove(namespace, jobID string) error {
p.l.Lock()
defer p.l.Unlock()
return p.removeLocked(jobID)
return p.removeLocked(structs.NamespacedID{
ID: jobID,
Namespace: namespace,
})
}
// Remove stops tracking the passed job. If the job is not tracked, it is a
// no-op. It assumes this is called while a lock is held.
func (p *PeriodicDispatch) removeLocked(jobID string) error {
func (p *PeriodicDispatch) removeLocked(jobID structs.NamespacedID) error {
// Do nothing if not enabled
if !p.enabled {
return nil
@ -255,7 +269,7 @@ func (p *PeriodicDispatch) removeLocked(jobID string) error {
delete(p.tracked, jobID)
if err := p.heap.Remove(job); err != nil {
return fmt.Errorf("failed to remove tracked job %v: %v", jobID, err)
return fmt.Errorf("failed to remove tracked job %q (%s): %v", jobID.ID, jobID.Namespace, err)
}
// Signal an update.
@ -264,13 +278,13 @@ func (p *PeriodicDispatch) removeLocked(jobID string) error {
default:
}
p.logger.Printf("[DEBUG] nomad.periodic: deregistered periodic job %q", jobID)
p.logger.Printf("[DEBUG] nomad.periodic: deregistered periodic job %q (%s)", jobID.ID, jobID.Namespace)
return nil
}
// ForceRun causes the periodic job to be evaluated immediately and returns the
// subsequent eval.
func (p *PeriodicDispatch) ForceRun(jobID string) (*structs.Evaluation, error) {
func (p *PeriodicDispatch) ForceRun(namespace, jobID string) (*structs.Evaluation, error) {
p.l.Lock()
// Do nothing if not enabled
@ -279,10 +293,14 @@ func (p *PeriodicDispatch) ForceRun(jobID string) (*structs.Evaluation, error) {
return nil, fmt.Errorf("periodic dispatch disabled")
}
job, tracked := p.tracked[jobID]
tuple := structs.NamespacedID{
ID: jobID,
Namespace: namespace,
}
job, tracked := p.tracked[tuple]
if !tracked {
p.l.Unlock()
return nil, fmt.Errorf("can't force run non-tracked job %v", jobID)
return nil, fmt.Errorf("can't force run non-tracked job %q (%s)", jobID, namespace)
}
p.l.Unlock()
@ -307,7 +325,7 @@ func (p *PeriodicDispatch) run(ctx context.Context) {
} else {
launchDur := launch.Sub(time.Now().In(job.Periodic.GetLocation()))
launchCh = time.After(launchDur)
p.logger.Printf("[DEBUG] nomad.periodic: launching job %q in %s", job.ID, launchDur)
p.logger.Printf("[DEBUG] nomad.periodic: launching job %q (%s) in %s", job.ID, job.Namespace, launchDur)
}
select {
@ -328,7 +346,7 @@ func (p *PeriodicDispatch) dispatch(job *structs.Job, launchTime time.Time) {
nextLaunch := job.Periodic.Next(launchTime)
if err := p.heap.Update(job, nextLaunch); err != nil {
p.logger.Printf("[ERR] nomad.periodic: failed to update next launch of periodic job %q: %v", job.ID, err)
p.logger.Printf("[ERR] nomad.periodic: failed to update next launch of periodic job %q (%s): %v", job.ID, job.Namespace, err)
}
// If the job prohibits overlapping and there are running children, we skip
@ -337,7 +355,7 @@ func (p *PeriodicDispatch) dispatch(job *structs.Job, launchTime time.Time) {
running, err := p.dispatcher.RunningChildren(job)
if err != nil {
msg := fmt.Sprintf("[ERR] nomad.periodic: failed to determine if"+
" periodic job %q has running children: %v", job.ID, err)
" periodic job %q (%s) has running children: %v", job.ID, job.Namespace, err)
p.logger.Println(msg)
p.l.Unlock()
return
@ -345,14 +363,14 @@ func (p *PeriodicDispatch) dispatch(job *structs.Job, launchTime time.Time) {
if running {
msg := fmt.Sprintf("[DEBUG] nomad.periodic: skipping launch of"+
" periodic job %q because job prohibits overlap", job.ID)
" periodic job %q (%s) because job prohibits overlap", job.ID, job.Namespace)
p.logger.Println(msg)
p.l.Unlock()
return
}
}
p.logger.Printf("[DEBUG] nomad.periodic: launching job %v at %v", job.ID, launchTime)
p.logger.Printf("[DEBUG] nomad.periodic: launching job %q (%v) at %v", job.ID, job.Namespace, launchTime)
p.l.Unlock()
p.createEval(job, launchTime)
}
@ -386,7 +404,8 @@ func (p *PeriodicDispatch) createEval(periodicJob *structs.Job, time time.Time)
eval, err := p.dispatcher.DispatchJob(derived)
if err != nil {
p.logger.Printf("[ERR] nomad.periodic: failed to dispatch job %q: %v", periodicJob.ID, err)
p.logger.Printf("[ERR] nomad.periodic: failed to dispatch job %q (%s): %v",
periodicJob.ID, periodicJob.Namespace, err)
return nil, err
}
@ -402,11 +421,13 @@ func (p *PeriodicDispatch) deriveJob(periodicJob *structs.Job, time time.Time) (
defer func() {
if r := recover(); r != nil {
p.logger.Printf("[ERR] nomad.periodic: deriving job from"+
" periodic job %v failed; deregistering from periodic runner: %v",
periodicJob.ID, r)
p.Remove(periodicJob.ID)
" periodic job %q (%s) failed; deregistering from periodic runner: %v",
periodicJob.ID, periodicJob.Namespace, r)
p.Remove(periodicJob.Namespace, periodicJob.ID)
derived = nil
err = fmt.Errorf("Failed to create a copy of the periodic job %v: %v", periodicJob.ID, r)
err = fmt.Errorf("Failed to create a copy of the periodic job %q (%s): %v",
periodicJob.ID, periodicJob.Namespace, r)
}
}()
@ -445,14 +466,14 @@ func (p *PeriodicDispatch) LaunchTime(jobID string) (time.Time, error) {
// flush clears the state of the PeriodicDispatcher
func (p *PeriodicDispatch) flush() {
p.updateCh = make(chan struct{}, 1)
p.tracked = make(map[string]*structs.Job)
p.tracked = make(map[structs.NamespacedID]*structs.Job)
p.heap = NewPeriodicHeap()
p.stopFn = nil
}
// periodicHeap wraps a heap and gives operations other than Push/Pop.
type periodicHeap struct {
index map[string]*periodicJob
index map[structs.NamespacedID]*periodicJob
heap periodicHeapImp
}
@ -464,18 +485,22 @@ type periodicJob struct {
func NewPeriodicHeap() *periodicHeap {
return &periodicHeap{
index: make(map[string]*periodicJob),
index: make(map[structs.NamespacedID]*periodicJob),
heap: make(periodicHeapImp, 0),
}
}
func (p *periodicHeap) Push(job *structs.Job, next time.Time) error {
if _, ok := p.index[job.ID]; ok {
return fmt.Errorf("job %v already exists", job.ID)
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
if _, ok := p.index[tuple]; ok {
return fmt.Errorf("job %q (%s) already exists", job.ID, job.Namespace)
}
pJob := &periodicJob{job, next, 0}
p.index[job.ID] = pJob
p.index[tuple] = pJob
heap.Push(&p.heap, pJob)
return nil
}
@ -486,7 +511,11 @@ func (p *periodicHeap) Pop() *periodicJob {
}
pJob := heap.Pop(&p.heap).(*periodicJob)
delete(p.index, pJob.job.ID)
tuple := structs.NamespacedID{
ID: pJob.job.ID,
Namespace: pJob.job.Namespace,
}
delete(p.index, tuple)
return pJob
}
@ -499,12 +528,20 @@ func (p *periodicHeap) Peek() *periodicJob {
}
func (p *periodicHeap) Contains(job *structs.Job) bool {
_, ok := p.index[job.ID]
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
_, ok := p.index[tuple]
return ok
}
func (p *periodicHeap) Update(job *structs.Job, next time.Time) error {
if pJob, ok := p.index[job.ID]; ok {
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
if pJob, ok := p.index[tuple]; ok {
// Need to update the job as well because its spec can change.
pJob.job = job
pJob.next = next
@ -512,17 +549,21 @@ func (p *periodicHeap) Update(job *structs.Job, next time.Time) error {
return nil
}
return fmt.Errorf("heap doesn't contain job %v", job.ID)
return fmt.Errorf("heap doesn't contain job %q (%s)", job.ID, job.Namespace)
}
func (p *periodicHeap) Remove(job *structs.Job) error {
if pJob, ok := p.index[job.ID]; ok {
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
if pJob, ok := p.index[tuple]; ok {
heap.Remove(&p.heap, pJob.index)
delete(p.index, job.ID)
delete(p.index, tuple)
return nil
}
return fmt.Errorf("heap doesn't contain job %v", job.ID)
return fmt.Errorf("heap doesn't contain job %q (%s)", job.ID, job.Namespace)
}
func (p *periodicHeap) Length() int {

View File

@ -33,7 +33,7 @@ func (p *Periodic) Force(args *structs.PeriodicForceRequest, reply *structs.Peri
}
ws := memdb.NewWatchSet()
job, err := snap.JobByID(ws, args.JobID)
job, err := snap.JobByID(ws, args.RequestNamespace(), args.JobID)
if err != nil {
return err
}
@ -46,7 +46,7 @@ func (p *Periodic) Force(args *structs.PeriodicForceRequest, reply *structs.Peri
}
// Force run the job.
eval, err := p.srv.periodicDispatcher.ForceRun(job.ID)
eval, err := p.srv.periodicDispatcher.ForceRun(args.RequestNamespace(), job.ID)
if err != nil {
return fmt.Errorf("force launch for job %q failed: %v", job.ID, err)
}

View File

@ -31,7 +31,10 @@ func TestPeriodicEndpoint_Force(t *testing.T) {
// Force launch it.
req := &structs.PeriodicForceRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response
@ -76,7 +79,10 @@ func TestPeriodicEndpoint_Force_NonPeriodic(t *testing.T) {
// Force launch it.
req := &structs.PeriodicForceRequest{
JobID: job.ID,
WriteRequest: structs.WriteRequest{Region: "global"},
WriteRequest: structs.WriteRequest{
Region: "global",
Namespace: job.Namespace,
},
}
// Fetch the response

View File

@ -16,21 +16,26 @@ import (
"github.com/hashicorp/nomad/nomad/mock"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/testutil"
"github.com/stretchr/testify/assert"
)
type MockJobEvalDispatcher struct {
Jobs map[string]*structs.Job
Jobs map[structs.NamespacedID]*structs.Job
lock sync.Mutex
}
func NewMockJobEvalDispatcher() *MockJobEvalDispatcher {
return &MockJobEvalDispatcher{Jobs: make(map[string]*structs.Job)}
return &MockJobEvalDispatcher{Jobs: make(map[structs.NamespacedID]*structs.Job)}
}
func (m *MockJobEvalDispatcher) DispatchJob(job *structs.Job) (*structs.Evaluation, error) {
m.lock.Lock()
defer m.lock.Unlock()
m.Jobs[job.ID] = job
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
m.Jobs[tuple] = job
return nil, nil
}
@ -38,7 +43,7 @@ func (m *MockJobEvalDispatcher) RunningChildren(parent *structs.Job) (bool, erro
m.lock.Lock()
defer m.lock.Unlock()
for _, job := range m.Jobs {
if job.ParentID == parent.ID {
if job.ParentID == parent.ID && job.Namespace == parent.Namespace {
return true, nil
}
}
@ -46,12 +51,12 @@ func (m *MockJobEvalDispatcher) RunningChildren(parent *structs.Job) (bool, erro
}
// LaunchTimes returns the launch times of child jobs in sorted order.
func (m *MockJobEvalDispatcher) LaunchTimes(p *PeriodicDispatch, parentID string) ([]time.Time, error) {
func (m *MockJobEvalDispatcher) LaunchTimes(p *PeriodicDispatch, namespace, parentID string) ([]time.Time, error) {
m.lock.Lock()
defer m.lock.Unlock()
var launches []time.Time
for _, job := range m.Jobs {
if job.ParentID != parentID {
if job.ParentID != parentID || job.Namespace != namespace {
continue
}
@ -179,6 +184,22 @@ func TestPeriodicDispatch_Add_UpdateJob(t *testing.T) {
}
}
func TestPeriodicDispatch_Add_Remove_Namespaced(t *testing.T) {
assert := assert.New(t)
t.Parallel()
p, _ := testPeriodicDispatcher()
job := mock.PeriodicJob()
job2 := mock.PeriodicJob()
job2.Namespace = "test"
assert.Nil(p.Add(job))
assert.Nil(p.Add(job2))
assert.Len(p.Tracked(), 2)
assert.Nil(p.Remove(job2.Namespace, job2.ID))
assert.Len(p.Tracked(), 1)
assert.Equal(p.Tracked()[0], job)
}
func TestPeriodicDispatch_Add_RemoveJob(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
@ -224,14 +245,18 @@ func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) {
}
// Check that nothing is created.
if _, ok := m.Jobs[job.ID]; ok {
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
if _, ok := m.Jobs[tuple]; ok {
t.Fatalf("periodic dispatcher created eval at the wrong time")
}
time.Sleep(2 * time.Second)
// Check that job was launched correctly.
times, err := m.LaunchTimes(p, job.ID)
times, err := m.LaunchTimes(p, job.Namespace, job.ID)
if err != nil {
t.Fatalf("failed to get launch times for job %q", job.ID)
}
@ -246,7 +271,7 @@ func TestPeriodicDispatch_Add_TriggersUpdate(t *testing.T) {
func TestPeriodicDispatch_Remove_Untracked(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
if err := p.Remove("foo"); err != nil {
if err := p.Remove("ns", "foo"); err != nil {
t.Fatalf("Remove failed %v; expected a no-op", err)
}
}
@ -265,7 +290,7 @@ func TestPeriodicDispatch_Remove_Tracked(t *testing.T) {
t.Fatalf("Add didn't track the job: %v", tracked)
}
if err := p.Remove(job.ID); err != nil {
if err := p.Remove(job.Namespace, job.ID); err != nil {
t.Fatalf("Remove failed %v", err)
}
@ -288,7 +313,7 @@ func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) {
}
// Remove the job.
if err := p.Remove(job.ID); err != nil {
if err := p.Remove(job.Namespace, job.ID); err != nil {
t.Fatalf("Add failed %v", err)
}
@ -296,7 +321,11 @@ func TestPeriodicDispatch_Remove_TriggersUpdate(t *testing.T) {
// Check that an eval wasn't created.
d := p.dispatcher.(*MockJobEvalDispatcher)
if _, ok := d.Jobs[job.ID]; ok {
tuple := structs.NamespacedID{
ID: job.ID,
Namespace: job.Namespace,
}
if _, ok := d.Jobs[tuple]; ok {
t.Fatalf("Remove didn't cancel creation of an eval")
}
}
@ -305,7 +334,7 @@ func TestPeriodicDispatch_ForceRun_Untracked(t *testing.T) {
t.Parallel()
p, _ := testPeriodicDispatcher()
if _, err := p.ForceRun("foo"); err == nil {
if _, err := p.ForceRun("ns", "foo"); err == nil {
t.Fatal("ForceRun of untracked job should fail")
}
}
@ -323,12 +352,12 @@ func TestPeriodicDispatch_ForceRun_Tracked(t *testing.T) {
}
// ForceRun the job
if _, err := p.ForceRun(job.ID); err != nil {
if _, err := p.ForceRun(job.Namespace, job.ID); err != nil {
t.Fatalf("ForceRun failed %v", err)
}
// Check that job was launched correctly.
launches, err := m.LaunchTimes(p, job.ID)
launches, err := m.LaunchTimes(p, job.Namespace, job.ID)
if err != nil {
t.Fatalf("failed to get launch times for job %q: %v", job.ID, err)
}
@ -357,7 +386,7 @@ func TestPeriodicDispatch_Run_DisallowOverlaps(t *testing.T) {
time.Sleep(3 * time.Second)
// Check that only one job was launched.
times, err := m.LaunchTimes(p, job.ID)
times, err := m.LaunchTimes(p, job.Namespace, job.ID)
if err != nil {
t.Fatalf("failed to get launch times for job %q", job.ID)
}
@ -386,7 +415,7 @@ func TestPeriodicDispatch_Run_Multiple(t *testing.T) {
time.Sleep(3 * time.Second)
// Check that job was launched correctly.
times, err := m.LaunchTimes(p, job.ID)
times, err := m.LaunchTimes(p, job.Namespace, job.ID)
if err != nil {
t.Fatalf("failed to get launch times for job %q", job.ID)
}
@ -418,11 +447,55 @@ func TestPeriodicDispatch_Run_SameTime(t *testing.T) {
t.Fatalf("Add failed %v", err)
}
if l := len(p.Tracked()); l != 2 {
t.Fatalf("got %d tracked; want 2", l)
}
time.Sleep(2 * time.Second)
// Check that the jobs were launched correctly.
for _, job := range []*structs.Job{job, job2} {
times, err := m.LaunchTimes(p, job.ID)
times, err := m.LaunchTimes(p, job.Namespace, job.ID)
if err != nil {
t.Fatalf("failed to get launch times for job %q", job.ID)
}
if len(times) != 1 {
t.Fatalf("incorrect number of launch times for job %q; got %d; want 1", job.ID, len(times))
}
if times[0] != launch {
t.Fatalf("periodic dispatcher created eval for time %v; want %v", times[0], launch)
}
}
}
func TestPeriodicDispatch_Run_SameID_Different_Namespace(t *testing.T) {
t.Parallel()
p, m := testPeriodicDispatcher()
// Create two job that will be launched at the same time.
launch := time.Now().Round(1 * time.Second).Add(1 * time.Second)
job := testPeriodicJob(launch)
job2 := testPeriodicJob(launch)
job2.ID = job.ID
job2.Namespace = "test"
// Add them.
if err := p.Add(job); err != nil {
t.Fatalf("Add failed %v", err)
}
if err := p.Add(job2); err != nil {
t.Fatalf("Add failed %v", err)
}
if l := len(p.Tracked()); l != 2 {
t.Fatalf("got %d tracked; want 2", l)
}
time.Sleep(2 * time.Second)
// Check that the jobs were launched correctly.
for _, job := range []*structs.Job{job, job2} {
times, err := m.LaunchTimes(p, job.Namespace, job.ID)
if err != nil {
t.Fatalf("failed to get launch times for job %q", job.ID)
}
@ -493,7 +566,7 @@ func TestPeriodicDispatch_Complex(t *testing.T) {
}
for _, job := range toDelete {
if err := p.Remove(job.ID); err != nil {
if err := p.Remove(job.Namespace, job.ID); err != nil {
t.Fatalf("Remove failed %v", err)
}
}
@ -501,9 +574,9 @@ func TestPeriodicDispatch_Complex(t *testing.T) {
time.Sleep(5 * time.Second)
actual := make(map[string][]time.Time, len(expected))
for _, job := range jobs {
launches, err := m.LaunchTimes(p, job.ID)
launches, err := m.LaunchTimes(p, job.Namespace, job.ID)
if err != nil {
t.Fatalf("LaunchTimes(%v) failed %v", job.ID, err)
t.Fatalf("LaunchTimes(%v, %v) failed %v", job.Namespace, job.ID, err)
}
actual[job.ID] = launches

View File

@ -1,7 +1,6 @@
package nomad
import (
"fmt"
"strings"
memdb "github.com/hashicorp/go-memdb"
@ -19,6 +18,11 @@ var (
// allContexts are the available contexts which are searched to find matches
// for a given prefix
allContexts = []structs.Context{structs.Allocs, structs.Jobs, structs.Nodes,
structs.Evals, structs.Deployments, structs.Namespaces}
// ossContexts are the oss contexts which are searched to find matches
// for a given prefix
ossContexts = []structs.Context{structs.Allocs, structs.Jobs, structs.Nodes,
structs.Evals, structs.Deployments}
)
@ -51,10 +55,15 @@ func (s *Search) getMatches(iter memdb.ResultIterator, prefix string) ([]string,
case *structs.Deployment:
id = raw.(*structs.Deployment).ID
default:
matchID, ok := getEnterpriseMatch(raw)
if !ok {
s.srv.logger.Printf("[ERR] nomad.resources: unexpected type for resources context: %T", t)
continue
}
id = matchID
}
if !strings.HasPrefix(id, prefix) {
continue
}
@ -67,20 +76,20 @@ func (s *Search) getMatches(iter memdb.ResultIterator, prefix string) ([]string,
// getResourceIter takes a context and returns a memdb iterator specific to
// that context
func getResourceIter(context structs.Context, prefix string, ws memdb.WatchSet, state *state.StateStore) (memdb.ResultIterator, error) {
func getResourceIter(context structs.Context, namespace, prefix string, ws memdb.WatchSet, state *state.StateStore) (memdb.ResultIterator, error) {
switch context {
case structs.Jobs:
return state.JobsByIDPrefix(ws, prefix)
return state.JobsByIDPrefix(ws, namespace, prefix)
case structs.Evals:
return state.EvalsByIDPrefix(ws, prefix)
return state.EvalsByIDPrefix(ws, namespace, prefix)
case structs.Allocs:
return state.AllocsByIDPrefix(ws, prefix)
return state.AllocsByIDPrefix(ws, namespace, prefix)
case structs.Nodes:
return state.NodesByIDPrefix(ws, prefix)
case structs.Deployments:
return state.DeploymentsByIDPrefix(ws, prefix)
return state.DeploymentsByIDPrefix(ws, namespace, prefix)
default:
return nil, fmt.Errorf("context must be one of %v or 'all' for all contexts; got %q", allContexts, context)
return getEnterpriseResourceIter(context, namespace, prefix, ws, state)
}
}
@ -122,7 +131,7 @@ func (s *Search) PrefixSearch(args *structs.SearchRequest,
}
for _, ctx := range contexts {
iter, err := getResourceIter(ctx, roundUUIDDownIfOdd(args.Prefix, args.Context), ws, state)
iter, err := getResourceIter(ctx, args.RequestNamespace(), roundUUIDDownIfOdd(args.Prefix, args.Context), ws, state)
if err != nil {
e := err.Error()

View File

@ -0,0 +1,24 @@
// +build !pro,!ent
package nomad
import (
"fmt"
memdb "github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/nomad/state"
"github.com/hashicorp/nomad/nomad/structs"
)
// getEnterpriseMatch is a no-op in oss since there are no enterprise objects.
func getEnterpriseMatch(match interface{}) (id string, ok bool) {
return "", false
}
// getEnterpriseResourceIter is used to retrieve an iterator over an enterprise
// only table.
func getEnterpriseResourceIter(context structs.Context, namespace, prefix string, ws memdb.WatchSet, state *state.StateStore) (memdb.ResultIterator, error) {
// If we have made it here then it is an error since we have exhausted all
// open source contexts.
return nil, fmt.Errorf("context must be one of %v or 'all' for all contexts; got %q", ossContexts, context)
}

View File

@ -14,16 +14,15 @@ import (
const jobIndex = 1000
func registerAndVerifyJob(s *Server, t *testing.T, prefix string, counter int) string {
func registerAndVerifyJob(s *Server, t *testing.T, prefix string, counter int) *structs.Job {
job := mock.Job()
job.ID = prefix + strconv.Itoa(counter)
state := s.fsm.State()
if err := state.UpsertJob(jobIndex, job); err != nil {
t.Fatalf("err: %v", err)
}
return job.ID
return job
}
func TestSearch_PrefixSearch_Job(t *testing.T) {
@ -39,11 +38,15 @@ func TestSearch_PrefixSearch_Job(t *testing.T) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
jobID := registerAndVerifyJob(s, t, prefix, 0)
job := registerAndVerifyJob(s, t, prefix, 0)
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Jobs,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.SearchResponse
@ -52,7 +55,7 @@ func TestSearch_PrefixSearch_Job(t *testing.T) {
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
assert.Equal(jobID, resp.Matches[structs.Jobs][0])
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.Equal(uint64(jobIndex), resp.Index)
}
@ -70,9 +73,10 @@ func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) {
testutil.WaitForLeader(t, s.RPC)
// Register a job and an allocation
jobID := registerAndVerifyJob(s, t, prefix, 0)
job := registerAndVerifyJob(s, t, prefix, 0)
alloc := mock.Alloc()
alloc.JobID = jobID
alloc.JobID = job.ID
alloc.Namespace = job.Namespace
summary := mock.JobSummary(alloc.JobID)
state := s.fsm.State()
@ -86,6 +90,10 @@ func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) {
req := &structs.SearchRequest{
Prefix: "example-",
Context: structs.All,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.SearchResponse
@ -94,7 +102,7 @@ func TestSearch_PrefixSearch_All_JobWithHyphen(t *testing.T) {
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
assert.Equal(jobID, resp.Matches[structs.Jobs][0])
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.EqualValues(jobIndex, resp.Index)
}
@ -112,9 +120,9 @@ func TestSearch_PrefixSearch_All_LongJob(t *testing.T) {
testutil.WaitForLeader(t, s.RPC)
// Register a job and an allocation
jobID := registerAndVerifyJob(s, t, prefix, 0)
job := registerAndVerifyJob(s, t, prefix, 0)
alloc := mock.Alloc()
alloc.JobID = jobID
alloc.JobID = job.ID
summary := mock.JobSummary(alloc.JobID)
state := s.fsm.State()
@ -128,6 +136,10 @@ func TestSearch_PrefixSearch_All_LongJob(t *testing.T) {
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.All,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.SearchResponse
@ -136,7 +148,7 @@ func TestSearch_PrefixSearch_All_LongJob(t *testing.T) {
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
assert.Equal(jobID, resp.Matches[structs.Jobs][0])
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.EqualValues(jobIndex, resp.Index)
}
@ -154,13 +166,18 @@ func TestSearch_PrefixSearch_Truncate(t *testing.T) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
var job *structs.Job
for counter := 0; counter < 25; counter++ {
registerAndVerifyJob(s, t, prefix, counter)
job = registerAndVerifyJob(s, t, prefix, counter)
}
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Jobs,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.SearchResponse
@ -186,15 +203,19 @@ func TestSearch_PrefixSearch_AllWithJob(t *testing.T) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
jobID := registerAndVerifyJob(s, t, prefix, 0)
job := registerAndVerifyJob(s, t, prefix, 0)
eval1 := mock.Eval()
eval1.ID = jobID
eval1.ID = job.ID
s.fsm.State().UpsertEvals(2000, []*structs.Evaluation{eval1})
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.All,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.SearchResponse
@ -203,7 +224,7 @@ func TestSearch_PrefixSearch_AllWithJob(t *testing.T) {
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
assert.Equal(jobID, resp.Matches[structs.Jobs][0])
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.Equal(1, len(resp.Matches[structs.Evals]))
assert.Equal(eval1.ID, resp.Matches[structs.Evals][0])
@ -228,6 +249,10 @@ func TestSearch_PrefixSearch_Evals(t *testing.T) {
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Evals,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
}
var resp structs.SearchResponse
@ -269,6 +294,10 @@ func TestSearch_PrefixSearch_Allocation(t *testing.T) {
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Allocs,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: alloc.Namespace,
},
}
var resp structs.SearchResponse
@ -322,6 +351,10 @@ func TestSearch_PrefixSearch_All_UUID_EvenPrefix(t *testing.T) {
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.All,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
}
var resp structs.SearchResponse
@ -359,6 +392,10 @@ func TestSearch_PrefixSearch_Node(t *testing.T) {
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Nodes,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.SearchResponse
@ -392,6 +429,10 @@ func TestSearch_PrefixSearch_Deployment(t *testing.T) {
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Deployments,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: deployment.Namespace,
},
}
var resp structs.SearchResponse
@ -435,6 +476,10 @@ func TestSearch_PrefixSearch_AllContext(t *testing.T) {
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.All,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: eval1.Namespace,
},
}
var resp structs.SearchResponse
@ -466,11 +511,15 @@ func TestSearch_PrefixSearch_NoPrefix(t *testing.T) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
jobID := registerAndVerifyJob(s, t, prefix, 0)
job := registerAndVerifyJob(s, t, prefix, 0)
req := &structs.SearchRequest{
Prefix: "",
Context: structs.Jobs,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.SearchResponse
@ -479,7 +528,7 @@ func TestSearch_PrefixSearch_NoPrefix(t *testing.T) {
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
assert.Equal(jobID, resp.Matches[structs.Jobs][0])
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
assert.Equal(uint64(jobIndex), resp.Index)
}
@ -502,6 +551,10 @@ func TestSearch_PrefixSearch_NoMatches(t *testing.T) {
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Jobs,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: structs.DefaultNamespace,
},
}
var resp structs.SearchResponse
@ -530,12 +583,16 @@ func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) {
codec := rpcClient(t, s)
testutil.WaitForLeader(t, s.RPC)
jobID1 := registerAndVerifyJob(s, t, id1, 0)
job := registerAndVerifyJob(s, t, id1, 0)
registerAndVerifyJob(s, t, id2, 50)
req := &structs.SearchRequest{
Prefix: prefix,
Context: structs.Jobs,
QueryOptions: structs.QueryOptions{
Region: "global",
Namespace: job.Namespace,
},
}
var resp structs.SearchResponse
@ -544,5 +601,5 @@ func TestSearch_PrefixSearch_RoundDownToEven(t *testing.T) {
}
assert.Equal(1, len(resp.Matches[structs.Jobs]))
assert.Equal(jobID1, resp.Matches[structs.Jobs][0])
assert.Equal(job.ID, resp.Matches[structs.Jobs][0])
}

View File

@ -187,6 +187,7 @@ type endpoints struct {
System *System
Operator *Operator
ACL *ACL
Enterprise *EnterpriseEndpoints
}
// NewServer is used to construct a new Nomad server from the
@ -736,6 +737,7 @@ func (s *Server) setupRPC(tlsWrap tlsutil.RegionWrapper) error {
s.endpoints.Status = &Status{s}
s.endpoints.System = &System{s}
s.endpoints.Search = &Search{s}
s.endpoints.Enterprise = NewEnterpriseEndpoints(s)
// Register the handlers
s.rpcServer.Register(s.endpoints.ACL)
@ -751,6 +753,7 @@ func (s *Server) setupRPC(tlsWrap tlsutil.RegionWrapper) error {
s.rpcServer.Register(s.endpoints.Status)
s.rpcServer.Register(s.endpoints.System)
s.rpcServer.Register(s.endpoints.Search)
s.endpoints.Enterprise.Register(s)
list, err := net.ListenTCP("tcp", s.config.RPCAddr)
if err != nil {

View File

@ -57,7 +57,7 @@ func testACLServer(t *testing.T, cb func(*Config)) (*Server, *structs.ACLToken)
func testServer(t *testing.T, cb func(*Config)) *Server {
// Setup the default settings
config := DefaultConfig()
config.Build = "unittest"
config.Build = "0.7.0+unittest"
config.DevMode = true
nodeNum := atomic.AddUint32(&nodeNumber, 1)
config.NodeName = fmt.Sprintf("nomad-%03d", nodeNum)

View File

@ -2,20 +2,35 @@ package state
import (
"fmt"
"sync"
"github.com/hashicorp/go-memdb"
"github.com/hashicorp/nomad/nomad/structs"
)
// stateStoreSchema is used to return the schema for the state store
func stateStoreSchema() *memdb.DBSchema {
// Create the root DB schema
db := &memdb.DBSchema{
Tables: make(map[string]*memdb.TableSchema),
}
var (
schemaFactories SchemaFactories
factoriesLock sync.Mutex
)
// Collect all the schemas that are needed
schemas := []func() *memdb.TableSchema{
// SchemaFactory is the factory method for returning a TableSchema
type SchemaFactory func() *memdb.TableSchema
type SchemaFactories []SchemaFactory
// RegisterSchemaFactories is used to register a table schema.
func RegisterSchemaFactories(factories ...SchemaFactory) {
factoriesLock.Lock()
defer factoriesLock.Unlock()
schemaFactories = append(schemaFactories, factories...)
}
func GetFactories() SchemaFactories {
return schemaFactories
}
func init() {
// Register all schemas
RegisterSchemaFactories([]SchemaFactory{
indexTableSchema,
nodeTableSchema,
jobTableSchema,
@ -28,10 +43,18 @@ func stateStoreSchema() *memdb.DBSchema {
vaultAccessorTableSchema,
aclPolicyTableSchema,
aclTokenTableSchema,
}...)
}
// stateStoreSchema is used to return the schema for the state store
func stateStoreSchema() *memdb.DBSchema {
// Create the root DB schema
db := &memdb.DBSchema{
Tables: make(map[string]*memdb.TableSchema),
}
// Add each of the tables
for _, schemaFn := range schemas {
for _, schemaFn := range GetFactories() {
schema := schemaFn()
if _, ok := db.Tables[schema.Name]; ok {
panic(fmt.Sprintf("duplicate table name: %s", schema.Name))
@ -88,14 +111,24 @@ func jobTableSchema() *memdb.TableSchema {
Indexes: map[string]*memdb.IndexSchema{
// Primary index is used for job management
// and simple direct lookup. ID is required to be
// unique.
// unique within a namespace.
"id": &memdb.IndexSchema{
Name: "id",
AllowMissing: false,
Unique: true,
Indexer: &memdb.StringFieldIndex{
// Use a compound index so the tuple of (Namespace, ID) is
// uniquely identifying
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Namespace",
},
&memdb.StringFieldIndex{
Field: "ID",
Lowercase: true,
},
},
},
},
"type": &memdb.IndexSchema{
@ -136,9 +169,19 @@ func jobSummarySchema() *memdb.TableSchema {
Name: "id",
AllowMissing: false,
Unique: true,
Indexer: &memdb.StringFieldIndex{
// Use a compound index so the tuple of (Namespace, JobID) is
// uniquely identifying
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Namespace",
},
&memdb.StringFieldIndex{
Field: "JobID",
Lowercase: true,
},
},
},
},
},
@ -156,16 +199,19 @@ func jobVersionSchema() *memdb.TableSchema {
AllowMissing: false,
Unique: true,
// Use a compound index so the tuple of (JobID, Version) is
// Use a compound index so the tuple of (Namespace, ID, Version) is
// uniquely identifying
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Namespace",
},
&memdb.StringFieldIndex{
Field: "ID",
Lowercase: true,
},
// Will need to create a new indexer
&memdb.UintFieldIndex{
Field: "Version",
},
@ -240,14 +286,33 @@ func deploymentSchema() *memdb.TableSchema {
},
},
"namespace": &memdb.IndexSchema{
Name: "namespace",
AllowMissing: false,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "Namespace",
},
},
// Job index is used to lookup deployments by job
"job": &memdb.IndexSchema{
Name: "job",
AllowMissing: false,
Unique: false,
Indexer: &memdb.StringFieldIndex{
// Use a compound index so the tuple of (Namespace, JobID) is
// uniquely identifying
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Namespace",
},
&memdb.StringFieldIndex{
Field: "JobID",
Lowercase: true,
},
},
},
},
},
@ -267,9 +332,19 @@ func periodicLaunchTableSchema() *memdb.TableSchema {
Name: "id",
AllowMissing: false,
Unique: true,
Indexer: &memdb.StringFieldIndex{
// Use a compound index so the tuple of (Namespace, JobID) is
// uniquely identifying
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Namespace",
},
&memdb.StringFieldIndex{
Field: "ID",
Lowercase: true,
},
},
},
},
},
@ -293,6 +368,15 @@ func evalTableSchema() *memdb.TableSchema {
},
},
"namespace": &memdb.IndexSchema{
Name: "namespace",
AllowMissing: false,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "Namespace",
},
},
// Job index is used to lookup allocations by job
"job": &memdb.IndexSchema{
Name: "job",
@ -300,10 +384,15 @@ func evalTableSchema() *memdb.TableSchema {
Unique: false,
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Namespace",
},
&memdb.StringFieldIndex{
Field: "JobID",
Lowercase: true,
},
&memdb.StringFieldIndex{
Field: "Status",
Lowercase: true,
@ -332,6 +421,15 @@ func allocTableSchema() *memdb.TableSchema {
},
},
"namespace": &memdb.IndexSchema{
Name: "namespace",
AllowMissing: false,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Field: "Namespace",
},
},
// Node index is used to lookup allocations by node
"node": &memdb.IndexSchema{
Name: "node",
@ -366,9 +464,17 @@ func allocTableSchema() *memdb.TableSchema {
Name: "job",
AllowMissing: false,
Unique: false,
Indexer: &memdb.StringFieldIndex{
Indexer: &memdb.CompoundIndex{
Indexes: []memdb.Indexer{
&memdb.StringFieldIndex{
Field: "Namespace",
},
&memdb.StringFieldIndex{
Field: "JobID",
Lowercase: true,
},
},
},
},

View File

@ -197,8 +197,13 @@ func (s *StateStore) UpsertJobSummary(index uint64, jobSummary *structs.JobSumma
txn := s.db.Txn(true)
defer txn.Abort()
// TODO(alex): Remove before releasing
if jobSummary.Namespace == "" {
panic("empty namespace")
}
// Check if the job summary already exists
existing, err := txn.First("job_summary", "id", jobSummary.JobID)
existing, err := txn.First("job_summary", "id", jobSummary.Namespace, jobSummary.JobID)
if err != nil {
return fmt.Errorf("job summary lookup failed: %v", err)
}
@ -228,12 +233,16 @@ func (s *StateStore) UpsertJobSummary(index uint64, jobSummary *structs.JobSumma
// DeleteJobSummary deletes the job summary with the given ID. This is for
// testing purposes only.
func (s *StateStore) DeleteJobSummary(index uint64, id string) error {
func (s *StateStore) DeleteJobSummary(index uint64, namespace, id string) error {
txn := s.db.Txn(true)
defer txn.Abort()
if namespace == "" {
panic("empty namespace")
}
// Delete the job summary
if _, err := txn.DeleteAll("job_summary", "id", id); err != nil {
if _, err := txn.DeleteAll("job_summary", "id", namespace, id); err != nil {
return fmt.Errorf("deleting job summary failed: %v", err)
}
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
@ -283,7 +292,7 @@ func (s *StateStore) upsertDeploymentImpl(index uint64, deployment *structs.Depl
// If the deployment is being marked as complete, set the job to stable.
if deployment.Status == structs.DeploymentStatusSuccessful {
if err := s.updateJobStabilityImpl(index, deployment.JobID, deployment.JobVersion, true, txn); err != nil {
if err := s.updateJobStabilityImpl(index, deployment.Namespace, deployment.JobID, deployment.JobVersion, true, txn); err != nil {
return fmt.Errorf("failed to update job stability: %v", err)
}
}
@ -304,7 +313,20 @@ func (s *StateStore) Deployments(ws memdb.WatchSet) (memdb.ResultIterator, error
return iter, nil
}
func (s *StateStore) DeploymentsByIDPrefix(ws memdb.WatchSet, deploymentID string) (memdb.ResultIterator, error) {
func (s *StateStore) DeploymentsByNamespace(ws memdb.WatchSet, namespace string) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
// Walk the entire deployments table
iter, err := txn.Get("deployment", "namespace", namespace)
if err != nil {
return nil, err
}
ws.Add(iter.WatchCh())
return iter, nil
}
func (s *StateStore) DeploymentsByIDPrefix(ws memdb.WatchSet, namespace, deploymentID string) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
// Walk the entire deployments table
@ -314,7 +336,23 @@ func (s *StateStore) DeploymentsByIDPrefix(ws memdb.WatchSet, deploymentID strin
}
ws.Add(iter.WatchCh())
return iter, nil
// Wrap the iterator in a filter
wrap := memdb.NewFilterIterator(iter, deploymentNamespaceFilter(namespace))
return wrap, nil
}
// deploymentNamespaceFilter returns a filter function that filters all
// deployment not in the given namespace.
func deploymentNamespaceFilter(namespace string) func(interface{}) bool {
return func(raw interface{}) bool {
d, ok := raw.(*structs.Deployment)
if !ok {
return true
}
return d.Namespace != namespace
}
}
func (s *StateStore) DeploymentByID(ws memdb.WatchSet, deploymentID string) (*structs.Deployment, error) {
@ -336,11 +374,15 @@ func (s *StateStore) deploymentByIDImpl(ws memdb.WatchSet, deploymentID string,
return nil, nil
}
func (s *StateStore) DeploymentsByJobID(ws memdb.WatchSet, jobID string) ([]*structs.Deployment, error) {
func (s *StateStore) DeploymentsByJobID(ws memdb.WatchSet, namespace, jobID string) ([]*structs.Deployment, error) {
txn := s.db.Txn(false)
if namespace == "" {
panic("empty namespace")
}
// Get an iterator over the deployments
iter, err := txn.Get("deployment", "job", jobID)
iter, err := txn.Get("deployment", "job", namespace, jobID)
if err != nil {
return nil, err
}
@ -363,11 +405,15 @@ func (s *StateStore) DeploymentsByJobID(ws memdb.WatchSet, jobID string) ([]*str
// LatestDeploymentByJobID returns the latest deployment for the given job. The
// latest is determined strictly by CreateIndex.
func (s *StateStore) LatestDeploymentByJobID(ws memdb.WatchSet, jobID string) (*structs.Deployment, error) {
func (s *StateStore) LatestDeploymentByJobID(ws memdb.WatchSet, namespace, jobID string) (*structs.Deployment, error) {
txn := s.db.Txn(false)
if namespace == "" {
panic("empty namespace")
}
// Get an iterator over the deployments
iter, err := txn.Get("deployment", "job", jobID)
iter, err := txn.Get("deployment", "job", namespace, jobID)
if err != nil {
return nil, err
}
@ -610,8 +656,11 @@ func (s *StateStore) UpsertJob(index uint64, job *structs.Job) error {
// upsertJobImpl is the implementation for registering a job or updating a job definition
func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion bool, txn *memdb.Txn) error {
if job.Namespace == "" {
panic("empty namespace")
}
// Check if the job already exists
existing, err := txn.First("jobs", "id", job.ID)
existing, err := txn.First("jobs", "id", job.Namespace, job.ID)
if err != nil {
return fmt.Errorf("job lookup failed: %v", err)
}
@ -646,7 +695,7 @@ func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion b
}
// Have to get the job again since it could have been updated
updated, err := txn.First("jobs", "id", job.ID)
updated, err := txn.First("jobs", "id", job.Namespace, job.ID)
if err != nil {
return fmt.Errorf("job lookup failed: %v", err)
}
@ -679,12 +728,15 @@ func (s *StateStore) upsertJobImpl(index uint64, job *structs.Job, keepVersion b
}
// DeleteJob is used to deregister a job
func (s *StateStore) DeleteJob(index uint64, jobID string) error {
func (s *StateStore) DeleteJob(index uint64, namespace, jobID string) error {
txn := s.db.Txn(true)
defer txn.Abort()
if namespace == "" {
panic("empty namespace")
}
// Lookup the node
existing, err := txn.First("jobs", "id", jobID)
existing, err := txn.First("jobs", "id", namespace, jobID)
if err != nil {
return fmt.Errorf("job lookup failed: %v", err)
}
@ -695,7 +747,7 @@ func (s *StateStore) DeleteJob(index uint64, jobID string) error {
// Check if we should update a parent job summary
job := existing.(*structs.Job)
if job.ParentID != "" {
summaryRaw, err := txn.First("job_summary", "id", job.ParentID)
summaryRaw, err := txn.First("job_summary", "id", namespace, job.ParentID)
if err != nil {
return fmt.Errorf("unable to retrieve summary for parent job: %v", err)
}
@ -752,7 +804,7 @@ func (s *StateStore) DeleteJob(index uint64, jobID string) error {
}
// Delete the job summary
if _, err = txn.DeleteAll("job_summary", "id", jobID); err != nil {
if _, err = txn.DeleteAll("job_summary", "id", namespace, jobID); err != nil {
return fmt.Errorf("deleing job summary failed: %v", err)
}
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
@ -765,7 +817,10 @@ func (s *StateStore) DeleteJob(index uint64, jobID string) error {
// deleteJobVersions deletes all versions of the given job.
func (s *StateStore) deleteJobVersions(index uint64, job *structs.Job, txn *memdb.Txn) error {
iter, err := txn.Get("job_version", "id_prefix", job.ID)
if job.Namespace == "" {
panic("empty namespace")
}
iter, err := txn.Get("job_version", "id_prefix", job.Namespace, job.ID)
if err != nil {
return err
}
@ -782,12 +837,12 @@ func (s *StateStore) deleteJobVersions(index uint64, job *structs.Job, txn *memd
continue
}
if _, err = txn.DeleteAll("job_version", "id", j.ID, j.Version); err != nil {
if _, err = txn.DeleteAll("job_version", "id", j.Namespace, j.ID, j.Version); err != nil {
return fmt.Errorf("deleting job versions failed: %v", err)
}
}
if err := txn.Insert("index", &IndexEntry{"job_summary", index}); err != nil {
if err := txn.Insert("index", &IndexEntry{"job_version", index}); err != nil {
return fmt.Errorf("index update failed: %v", err)
}
@ -797,6 +852,9 @@ func (s *StateStore) deleteJobVersions(index uint64, job *structs.Job, txn *memd
// upsertJobVersion inserts a job into its historic version table and limits the
// number of job versions that are tracked.
func (s *StateStore) upsertJobVersion(index uint64, job *structs.Job, txn *memdb.Txn) error {
if job.Namespace == "" {
panic("empty namespace")
}
// Insert the job
if err := txn.Insert("job_version", job); err != nil {
return fmt.Errorf("failed to insert job into job_version table: %v", err)
@ -807,7 +865,7 @@ func (s *StateStore) upsertJobVersion(index uint64, job *structs.Job, txn *memdb
}
// Get all the historic jobs for this ID
all, err := s.jobVersionByID(txn, nil, job.ID)
all, err := s.jobVersionByID(txn, nil, job.Namespace, job.ID)
if err != nil {
return fmt.Errorf("failed to look up job versions for %q: %v", job.ID, err)
}
@ -845,10 +903,13 @@ func (s *StateStore) upsertJobVersion(index uint64, job *structs.Job, txn *memdb
// JobByID is used to lookup a job by its ID. JobByID returns the current/latest job
// version.
func (s *StateStore) JobByID(ws memdb.WatchSet, id string) (*structs.Job, error) {
func (s *StateStore) JobByID(ws memdb.WatchSet, namespace, id string) (*structs.Job, error) {
txn := s.db.Txn(false)
watchCh, existing, err := txn.FirstWatch("jobs", "id", id)
if namespace == "" {
panic("empty namespace")
}
watchCh, existing, err := txn.FirstWatch("jobs", "id", namespace, id)
if err != nil {
return nil, fmt.Errorf("job lookup failed: %v", err)
}
@ -861,10 +922,14 @@ func (s *StateStore) JobByID(ws memdb.WatchSet, id string) (*structs.Job, error)
}
// JobsByIDPrefix is used to lookup a job by prefix
func (s *StateStore) JobsByIDPrefix(ws memdb.WatchSet, id string) (memdb.ResultIterator, error) {
func (s *StateStore) JobsByIDPrefix(ws memdb.WatchSet, namespace, id string) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
iter, err := txn.Get("jobs", "id_prefix", id)
if namespace == "" {
panic("empty namespace")
}
iter, err := txn.Get("jobs", "id_prefix", namespace, id)
if err != nil {
return nil, fmt.Errorf("job lookup failed: %v", err)
}
@ -875,17 +940,23 @@ func (s *StateStore) JobsByIDPrefix(ws memdb.WatchSet, id string) (memdb.ResultI
}
// JobVersionsByID returns all the tracked versions of a job.
func (s *StateStore) JobVersionsByID(ws memdb.WatchSet, id string) ([]*structs.Job, error) {
func (s *StateStore) JobVersionsByID(ws memdb.WatchSet, namespace, id string) ([]*structs.Job, error) {
txn := s.db.Txn(false)
return s.jobVersionByID(txn, &ws, id)
if namespace == "" {
panic("empty namespace")
}
return s.jobVersionByID(txn, &ws, namespace, id)
}
// jobVersionByID is the underlying implementation for retrieving all tracked
// versions of a job and is called under an existing transaction. A watch set
// can optionally be passed in to add the job histories to the watch set.
func (s *StateStore) jobVersionByID(txn *memdb.Txn, ws *memdb.WatchSet, id string) ([]*structs.Job, error) {
func (s *StateStore) jobVersionByID(txn *memdb.Txn, ws *memdb.WatchSet, namespace, id string) ([]*structs.Job, error) {
if namespace == "" {
panic("empty namespace")
}
// Get all the historic jobs for this ID
iter, err := txn.Get("job_version", "id_prefix", id)
iter, err := txn.Get("job_version", "id_prefix", namespace, id)
if err != nil {
return nil, err
}
@ -920,15 +991,23 @@ func (s *StateStore) jobVersionByID(txn *memdb.Txn, ws *memdb.WatchSet, id strin
// JobByIDAndVersion returns the job identified by its ID and Version. The
// passed watchset may be nil.
func (s *StateStore) JobByIDAndVersion(ws memdb.WatchSet, id string, version uint64) (*structs.Job, error) {
func (s *StateStore) JobByIDAndVersion(ws memdb.WatchSet, namespace, id string, version uint64) (*structs.Job, error) {
if namespace == "" {
panic("empty namespace")
}
txn := s.db.Txn(false)
return s.jobByIDAndVersionImpl(ws, id, version, txn)
return s.jobByIDAndVersionImpl(ws, namespace, id, version, txn)
}
// jobByIDAndVersionImpl returns the job identified by its ID and Version. The
// passed watchset may be nil.
func (s *StateStore) jobByIDAndVersionImpl(ws memdb.WatchSet, id string, version uint64, txn *memdb.Txn) (*structs.Job, error) {
watchCh, existing, err := txn.FirstWatch("job_version", "id", id, version)
func (s *StateStore) jobByIDAndVersionImpl(ws memdb.WatchSet, namespace, id string,
version uint64, txn *memdb.Txn) (*structs.Job, error) {
if namespace == "" {
panic("empty namespace")
}
watchCh, existing, err := txn.FirstWatch("job_version", "id", namespace, id, version)
if err != nil {
return nil, err
}
@ -973,6 +1052,25 @@ func (s *StateStore) Jobs(ws memdb.WatchSet) (memdb.ResultIterator, error) {
return iter, nil
}
// JobsByNamespace returns an iterator over all the jobs for the given namespace
func (s *StateStore) JobsByNamespace(ws memdb.WatchSet, namespace string) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
return s.jobsByNamespaceImpl(ws, namespace, txn)
}
// jobsByNamespaceImpl returns an iterator over all the jobs for the given namespace
func (s *StateStore) jobsByNamespaceImpl(ws memdb.WatchSet, namespace string, txn *memdb.Txn) (memdb.ResultIterator, error) {
// Walk the entire jobs table
iter, err := txn.Get("jobs", "id_prefix", namespace, "")
if err != nil {
return nil, err
}
ws.Add(iter.WatchCh())
return iter, nil
}
// JobsByPeriodic returns an iterator over all the periodic or non-periodic jobs.
func (s *StateStore) JobsByPeriodic(ws memdb.WatchSet, periodic bool) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
@ -1019,10 +1117,13 @@ func (s *StateStore) JobsByGC(ws memdb.WatchSet, gc bool) (memdb.ResultIterator,
}
// JobSummary returns a job summary object which matches a specific id.
func (s *StateStore) JobSummaryByID(ws memdb.WatchSet, jobID string) (*structs.JobSummary, error) {
func (s *StateStore) JobSummaryByID(ws memdb.WatchSet, namespace, jobID string) (*structs.JobSummary, error) {
txn := s.db.Txn(false)
watchCh, existing, err := txn.FirstWatch("job_summary", "id", jobID)
if namespace == "" {
panic("empty namespace")
}
watchCh, existing, err := txn.FirstWatch("job_summary", "id", namespace, jobID)
if err != nil {
return nil, err
}
@ -1053,10 +1154,13 @@ func (s *StateStore) JobSummaries(ws memdb.WatchSet) (memdb.ResultIterator, erro
}
// JobSummaryByPrefix is used to look up Job Summary by id prefix
func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, id string) (memdb.ResultIterator, error) {
func (s *StateStore) JobSummaryByPrefix(ws memdb.WatchSet, namespace, id string) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
iter, err := txn.Get("job_summary", "id_prefix", id)
if namespace == "" {
panic("empty namespace")
}
iter, err := txn.Get("job_summary", "id_prefix", namespace, id)
if err != nil {
return nil, fmt.Errorf("eval lookup failed: %v", err)
}
@ -1071,8 +1175,11 @@ func (s *StateStore) UpsertPeriodicLaunch(index uint64, launch *structs.Periodic
txn := s.db.Txn(true)
defer txn.Abort()
if launch.Namespace == "" {
panic("empty namespace")
}
// Check if the job already exists
existing, err := txn.First("periodic_launch", "id", launch.ID)
existing, err := txn.First("periodic_launch", "id", launch.Namespace, launch.ID)
if err != nil {
return fmt.Errorf("periodic launch lookup failed: %v", err)
}
@ -1099,12 +1206,15 @@ func (s *StateStore) UpsertPeriodicLaunch(index uint64, launch *structs.Periodic
}
// DeletePeriodicLaunch is used to delete the periodic launch
func (s *StateStore) DeletePeriodicLaunch(index uint64, jobID string) error {
func (s *StateStore) DeletePeriodicLaunch(index uint64, namespace, jobID string) error {
txn := s.db.Txn(true)
defer txn.Abort()
if namespace == "" {
panic("empty namespace")
}
// Lookup the launch
existing, err := txn.First("periodic_launch", "id", jobID)
existing, err := txn.First("periodic_launch", "id", namespace, jobID)
if err != nil {
return fmt.Errorf("launch lookup failed: %v", err)
}
@ -1126,10 +1236,13 @@ func (s *StateStore) DeletePeriodicLaunch(index uint64, jobID string) error {
// PeriodicLaunchByID is used to lookup a periodic launch by the periodic job
// ID.
func (s *StateStore) PeriodicLaunchByID(ws memdb.WatchSet, id string) (*structs.PeriodicLaunch, error) {
func (s *StateStore) PeriodicLaunchByID(ws memdb.WatchSet, namespace, id string) (*structs.PeriodicLaunch, error) {
txn := s.db.Txn(false)
watchCh, existing, err := txn.FirstWatch("periodic_launch", "id", id)
if namespace == "" {
panic("empty namespace")
}
watchCh, existing, err := txn.FirstWatch("periodic_launch", "id", namespace, id)
if err != nil {
return nil, fmt.Errorf("periodic launch lookup failed: %v", err)
}
@ -1163,13 +1276,17 @@ func (s *StateStore) UpsertEvals(index uint64, evals []*structs.Evaluation) erro
defer txn.Abort()
// Do a nested upsert
jobs := make(map[string]string, len(evals))
jobs := make(map[structs.NamespacedID]string, len(evals))
for _, eval := range evals {
if err := s.nestedUpsertEval(txn, index, eval); err != nil {
return err
}
jobs[eval.JobID] = ""
tuple := structs.NamespacedID{
ID: eval.JobID,
Namespace: eval.Namespace,
}
jobs[tuple] = ""
}
// Set the job's status
@ -1198,8 +1315,11 @@ func (s *StateStore) nestedUpsertEval(txn *memdb.Txn, index uint64, eval *struct
eval.ModifyIndex = index
}
if eval.Namespace == "" {
panic("empty namespace")
}
// Update the job summary
summaryRaw, err := txn.First("job_summary", "id", eval.JobID)
summaryRaw, err := txn.First("job_summary", "id", eval.Namespace, eval.JobID)
if err != nil {
return fmt.Errorf("job summary lookup failed: %v", err)
}
@ -1233,9 +1353,9 @@ func (s *StateStore) nestedUpsertEval(txn *memdb.Txn, index uint64, eval *struct
// Check if the job has any blocked evaluations and cancel them
if eval.Status == structs.EvalStatusComplete && len(eval.FailedTGAllocs) == 0 {
// Get the blocked evaluation for a job if it exists
iter, err := txn.Get("evals", "job", eval.JobID, structs.EvalStatusBlocked)
iter, err := txn.Get("evals", "job", eval.Namespace, eval.JobID, structs.EvalStatusBlocked)
if err != nil {
return fmt.Errorf("failed to get blocked evals for job %q: %v", eval.JobID, err)
return fmt.Errorf("failed to get blocked evals for job %q in namespace %q: %v", eval.JobID, eval.Namespace, err)
}
var blocked []*structs.Evaluation
@ -1274,7 +1394,7 @@ func (s *StateStore) DeleteEval(index uint64, evals []string, allocs []string) e
txn := s.db.Txn(true)
defer txn.Abort()
jobs := make(map[string]string, len(evals))
jobs := make(map[structs.NamespacedID]string, len(evals))
for _, eval := range evals {
existing, err := txn.First("evals", "id", eval)
if err != nil {
@ -1286,8 +1406,13 @@ func (s *StateStore) DeleteEval(index uint64, evals []string, allocs []string) e
if err := txn.Delete("evals", existing); err != nil {
return fmt.Errorf("eval delete failed: %v", err)
}
jobID := existing.(*structs.Evaluation).JobID
jobs[jobID] = ""
eval := existing.(*structs.Evaluation)
tuple := structs.NamespacedID{
ID: eval.JobID,
Namespace: eval.Namespace,
}
jobs[tuple] = ""
}
for _, alloc := range allocs {
@ -1337,10 +1462,12 @@ func (s *StateStore) EvalByID(ws memdb.WatchSet, id string) (*structs.Evaluation
return nil, nil
}
// EvalsByIDPrefix is used to lookup evaluations by prefix
func (s *StateStore) EvalsByIDPrefix(ws memdb.WatchSet, id string) (memdb.ResultIterator, error) {
// EvalsByIDPrefix is used to lookup evaluations by prefix in a particular
// namespace
func (s *StateStore) EvalsByIDPrefix(ws memdb.WatchSet, namespace, id string) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
// Get an iterator over all evals by the id prefix
iter, err := txn.Get("evals", "id_prefix", id)
if err != nil {
return nil, fmt.Errorf("eval lookup failed: %v", err)
@ -1348,15 +1475,33 @@ func (s *StateStore) EvalsByIDPrefix(ws memdb.WatchSet, id string) (memdb.Result
ws.Add(iter.WatchCh())
return iter, nil
// Wrap the iterator in a filter
wrap := memdb.NewFilterIterator(iter, evalNamespaceFilter(namespace))
return wrap, nil
}
// evalNamespaceFilter returns a filter function that filters all evaluations
// not in the given namespace.
func evalNamespaceFilter(namespace string) func(interface{}) bool {
return func(raw interface{}) bool {
eval, ok := raw.(*structs.Evaluation)
if !ok {
return true
}
return eval.Namespace != namespace
}
}
// EvalsByJob returns all the evaluations by job id
func (s *StateStore) EvalsByJob(ws memdb.WatchSet, jobID string) ([]*structs.Evaluation, error) {
func (s *StateStore) EvalsByJob(ws memdb.WatchSet, namespace, jobID string) ([]*structs.Evaluation, error) {
txn := s.db.Txn(false)
if namespace == "" {
panic("empty namespace")
}
// Get an iterator over the node allocations
iter, err := txn.Get("evals", "job_prefix", jobID)
iter, err := txn.Get("evals", "job_prefix", namespace, jobID)
if err != nil {
return nil, err
}
@ -1397,6 +1542,22 @@ func (s *StateStore) Evals(ws memdb.WatchSet) (memdb.ResultIterator, error) {
return iter, nil
}
// EvalsByNamespace returns an iterator over all the evaluations in the given
// namespace
func (s *StateStore) EvalsByNamespace(ws memdb.WatchSet, namespace string) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
// Walk the entire table
iter, err := txn.Get("evals", "namespace", namespace)
if err != nil {
return nil, err
}
ws.Add(iter.WatchCh())
return iter, nil
}
// UpdateAllocsFromClient is used to update an allocation based on input
// from a client. While the schedulers are the authority on the allocation for
// most things, some updates are authoritative from the client. Specifically,
@ -1448,7 +1609,6 @@ func (s *StateStore) nestedUpdateAllocFromClient(txn *memdb.Txn, index uint64, a
// Update the modify index
copyAlloc.ModifyIndex = index
// TODO TEST
if err := s.updateDeploymentWithAlloc(index, copyAlloc, exist, txn); err != nil {
return fmt.Errorf("error updating deployment: %v", err)
}
@ -1467,7 +1627,13 @@ func (s *StateStore) nestedUpdateAllocFromClient(txn *memdb.Txn, index uint64, a
if !copyAlloc.TerminalStatus() {
forceStatus = structs.JobStatusRunning
}
jobs := map[string]string{exist.JobID: forceStatus}
tuple := structs.NamespacedID{
ID: exist.JobID,
Namespace: exist.Namespace,
}
jobs := map[structs.NamespacedID]string{tuple: forceStatus}
if err := s.setJobStatuses(index, txn, jobs, false); err != nil {
return fmt.Errorf("setting job status failed: %v", err)
}
@ -1490,7 +1656,7 @@ func (s *StateStore) UpsertAllocs(index uint64, allocs []*structs.Allocation) er
// used with an existing transaction.
func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation, txn *memdb.Txn) error {
// Handle the allocations
jobs := make(map[string]string, 1)
jobs := make(map[structs.NamespacedID]string, 1)
for _, alloc := range allocs {
existing, err := txn.First("allocs", "id", alloc.ID)
if err != nil {
@ -1561,7 +1727,12 @@ func (s *StateStore) upsertAllocsImpl(index uint64, allocs []*structs.Allocation
if !alloc.TerminalStatus() {
forceStatus = structs.JobStatusRunning
}
jobs[alloc.JobID] = forceStatus
tuple := structs.NamespacedID{
ID: alloc.JobID,
Namespace: alloc.Namespace,
}
jobs[tuple] = forceStatus
}
// Update the indexes
@ -1595,7 +1766,7 @@ func (s *StateStore) AllocByID(ws memdb.WatchSet, id string) (*structs.Allocatio
}
// AllocsByIDPrefix is used to lookup allocs by prefix
func (s *StateStore) AllocsByIDPrefix(ws memdb.WatchSet, id string) (memdb.ResultIterator, error) {
func (s *StateStore) AllocsByIDPrefix(ws memdb.WatchSet, namespace, id string) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
iter, err := txn.Get("allocs", "id_prefix", id)
@ -1605,7 +1776,22 @@ func (s *StateStore) AllocsByIDPrefix(ws memdb.WatchSet, id string) (memdb.Resul
ws.Add(iter.WatchCh())
return iter, nil
// Wrap the iterator in a filter
wrap := memdb.NewFilterIterator(iter, allocNamespaceFilter(namespace))
return wrap, nil
}
// allocNamespaceFilter returns a filter function that filters all allocations
// not in the given namespace.
func allocNamespaceFilter(namespace string) func(interface{}) bool {
return func(raw interface{}) bool {
alloc, ok := raw.(*structs.Allocation)
if !ok {
return true
}
return alloc.Namespace != namespace
}
}
// AllocsByNode returns all the allocations by node
@ -1656,12 +1842,15 @@ func (s *StateStore) AllocsByNodeTerminal(ws memdb.WatchSet, node string, termin
}
// AllocsByJob returns all the allocations by job id
func (s *StateStore) AllocsByJob(ws memdb.WatchSet, jobID string, all bool) ([]*structs.Allocation, error) {
func (s *StateStore) AllocsByJob(ws memdb.WatchSet, namespace, jobID string, all bool) ([]*structs.Allocation, error) {
txn := s.db.Txn(false)
if namespace == "" {
panic("empty namespace")
}
// Get the job
var job *structs.Job
rawJob, err := txn.First("jobs", "id", jobID)
rawJob, err := txn.First("jobs", "id", namespace, jobID)
if err != nil {
return nil, err
}
@ -1670,7 +1859,7 @@ func (s *StateStore) AllocsByJob(ws memdb.WatchSet, jobID string, all bool) ([]*
}
// Get an iterator over the node allocations
iter, err := txn.Get("allocs", "job", jobID)
iter, err := txn.Get("allocs", "job", namespace, jobID)
if err != nil {
return nil, err
}
@ -1757,6 +1946,22 @@ func (s *StateStore) Allocs(ws memdb.WatchSet) (memdb.ResultIterator, error) {
return iter, nil
}
// AllocsByNamespace returns an iterator over all the allocations in the
// namespace
func (s *StateStore) AllocsByNamespace(ws memdb.WatchSet, namespace string) (memdb.ResultIterator, error) {
txn := s.db.Txn(false)
// Walk the entire table
iter, err := txn.Get("allocs", "namespace", namespace)
if err != nil {
return nil, err
}
ws.Add(iter.WatchCh())
return iter, nil
}
// UpsertVaultAccessors is used to register a set of Vault Accessors
func (s *StateStore) UpsertVaultAccessor(index uint64, accessors []*structs.VaultAccessor) error {
txn := s.db.Txn(true)
@ -1935,10 +2140,13 @@ func (s *StateStore) updateDeploymentStatusImpl(index uint64, u *structs.Deploym
if err := txn.Insert("index", &IndexEntry{"deployment", index}); err != nil {
return fmt.Errorf("index update failed: %v", err)
}
if copy.Namespace == "" {
panic("empty namespace")
}
// If the deployment is being marked as complete, set the job to stable.
if copy.Status == structs.DeploymentStatusSuccessful {
if err := s.updateJobStabilityImpl(index, copy.JobID, copy.JobVersion, true, txn); err != nil {
if err := s.updateJobStabilityImpl(index, copy.Namespace, copy.JobID, copy.JobVersion, true, txn); err != nil {
return fmt.Errorf("failed to update job stability: %v", err)
}
}
@ -1948,11 +2156,14 @@ func (s *StateStore) updateDeploymentStatusImpl(index uint64, u *structs.Deploym
// UpdateJobStability updates the stability of the given job and version to the
// desired status.
func (s *StateStore) UpdateJobStability(index uint64, jobID string, jobVersion uint64, stable bool) error {
func (s *StateStore) UpdateJobStability(index uint64, namespace, jobID string, jobVersion uint64, stable bool) error {
txn := s.db.Txn(true)
defer txn.Abort()
if namespace == "" {
panic("empty namespace")
}
if err := s.updateJobStabilityImpl(index, jobID, jobVersion, stable, txn); err != nil {
if err := s.updateJobStabilityImpl(index, namespace, jobID, jobVersion, stable, txn); err != nil {
return err
}
@ -1961,9 +2172,12 @@ func (s *StateStore) UpdateJobStability(index uint64, jobID string, jobVersion u
}
// updateJobStabilityImpl updates the stability of the given job and version
func (s *StateStore) updateJobStabilityImpl(index uint64, jobID string, jobVersion uint64, stable bool, txn *memdb.Txn) error {
func (s *StateStore) updateJobStabilityImpl(index uint64, namespace, jobID string, jobVersion uint64, stable bool, txn *memdb.Txn) error {
if namespace == "" {
panic("empty namespace")
}
// Get the job that is referenced
job, err := s.jobByIDAndVersionImpl(nil, jobID, jobVersion, txn)
job, err := s.jobByIDAndVersionImpl(nil, namespace, jobID, jobVersion, txn)
if err != nil {
return err
}
@ -2269,14 +2483,18 @@ func (s *StateStore) ReconcileJobSummaries(index uint64) error {
// Create a job summary for the job
summary := &structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: make(map[string]structs.TaskGroupSummary),
}
for _, tg := range job.TaskGroups {
summary.Summary[tg.Name] = structs.TaskGroupSummary{}
}
if job.Namespace == "" {
panic("empty namespace")
}
// Find all the allocations for the jobs
iterAllocs, err := txn.Get("allocs", "job", job.ID)
iterAllocs, err := txn.Get("allocs", "job", job.Namespace, job.ID)
if err != nil {
return err
}
@ -2336,9 +2554,12 @@ func (s *StateStore) ReconcileJobSummaries(index uint64) error {
// It takes a map of job IDs to an optional forceStatus string. It returns an
// error if the job doesn't exist or setJobStatus fails.
func (s *StateStore) setJobStatuses(index uint64, txn *memdb.Txn,
jobs map[string]string, evalDelete bool) error {
for job, forceStatus := range jobs {
existing, err := txn.First("jobs", "id", job)
jobs map[structs.NamespacedID]string, evalDelete bool) error {
for tuple, forceStatus := range jobs {
if tuple.Namespace == "" {
panic("empty namespace")
}
existing, err := txn.First("jobs", "id", tuple.Namespace, tuple.ID)
if err != nil {
return fmt.Errorf("job lookup failed: %v", err)
}
@ -2397,10 +2618,14 @@ func (s *StateStore) setJobStatus(index uint64, txn *memdb.Txn,
return fmt.Errorf("index update failed: %v", err)
}
if updated.Namespace == "" {
panic("empty namespace")
}
// Update the children summary
if updated.ParentID != "" {
// Try to update the summary of the parent job summary
summaryRaw, err := txn.First("job_summary", "id", updated.ParentID)
summaryRaw, err := txn.First("job_summary", "id", updated.Namespace, updated.ParentID)
if err != nil {
return fmt.Errorf("unable to retrieve summary for parent job: %v", err)
}
@ -2460,7 +2685,10 @@ func (s *StateStore) setJobStatus(index uint64, txn *memdb.Txn,
}
func (s *StateStore) getJobStatus(txn *memdb.Txn, job *structs.Job, evalDelete bool) (string, error) {
allocs, err := txn.Get("allocs", "job", job.ID)
if job.Namespace == "" {
panic("empty namespace")
}
allocs, err := txn.Get("allocs", "job", job.Namespace, job.ID)
if err != nil {
return "", err
}
@ -2474,7 +2702,7 @@ func (s *StateStore) getJobStatus(txn *memdb.Txn, job *structs.Job, evalDelete b
}
}
evals, err := txn.Get("evals", "job_prefix", job.ID)
evals, err := txn.Get("evals", "job_prefix", job.Namespace, job.ID)
if err != nil {
return "", err
}
@ -2529,8 +2757,12 @@ func (s *StateStore) getJobStatus(txn *memdb.Txn, job *structs.Job, evalDelete b
func (s *StateStore) updateSummaryWithJob(index uint64, job *structs.Job,
txn *memdb.Txn) error {
if job.Namespace == "" {
panic("empty namespace")
}
// Update the job summary
summaryRaw, err := txn.First("job_summary", "id", job.ID)
summaryRaw, err := txn.First("job_summary", "id", job.Namespace, job.ID)
if err != nil {
return fmt.Errorf("job summary lookup failed: %v", err)
}
@ -2543,6 +2775,7 @@ func (s *StateStore) updateSummaryWithJob(index uint64, job *structs.Job,
} else {
summary = &structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: make(map[string]structs.TaskGroupSummary),
Children: new(structs.JobChildrenSummary),
CreateIndex: index,
@ -2667,15 +2900,18 @@ func (s *StateStore) updateSummaryWithAlloc(index uint64, alloc *structs.Allocat
if alloc.Job == nil {
return nil
}
if alloc.Namespace == "" {
panic("empty namespace")
}
summaryRaw, err := txn.First("job_summary", "id", alloc.JobID)
summaryRaw, err := txn.First("job_summary", "id", alloc.Namespace, alloc.JobID)
if err != nil {
return fmt.Errorf("unable to lookup job summary for job id %q: %v", alloc.JobID, err)
return fmt.Errorf("unable to lookup job summary for job id %q in namespace %q: %v", alloc.JobID, alloc.Namespace, err)
}
if summaryRaw == nil {
// Check if the job is de-registered
rawJob, err := txn.First("jobs", "id", alloc.JobID)
rawJob, err := txn.First("jobs", "id", alloc.Namespace, alloc.JobID)
if err != nil {
return fmt.Errorf("unable to query job: %v", err)
}
@ -2685,7 +2921,7 @@ func (s *StateStore) updateSummaryWithAlloc(index uint64, alloc *structs.Allocat
return nil
}
return fmt.Errorf("job summary for job %q is not present", alloc.JobID)
return fmt.Errorf("job summary for job %q in namespace %q is not present", alloc.JobID, alloc.Namespace)
}
// Get a copy of the existing summary

File diff suppressed because it is too large Load Diff

View File

@ -113,6 +113,7 @@ const (
// DefaultNamespace is the default namespace.
DefaultNamespace = "default"
DefaultNamespaceDescription = "Default shared namespace"
)
// Context defines the scope in which a search for Nomad object operates, and
@ -125,9 +126,16 @@ const (
Evals Context = "evals"
Jobs Context = "jobs"
Nodes Context = "nodes"
Namespaces Context = "namespaces"
All Context = "all"
)
// NamespacedID is a tuple of an ID and a namespace
type NamespacedID struct {
ID string
Namespace string
}
// RPCInfo is used to describe common information about query
type RPCInfo interface {
RequestRegion() string
@ -140,6 +148,9 @@ type QueryOptions struct {
// The target region for this query
Region string
// Namespace is the target namespace for the query.
Namespace string
// If set, wait until query exceeds given index. Must be provided
// with MaxQueryTime.
MinQueryIndex uint64
@ -162,6 +173,13 @@ func (q QueryOptions) RequestRegion() string {
return q.Region
}
func (q QueryOptions) RequestNamespace() string {
if q.Namespace == "" {
return DefaultNamespace
}
return q.Namespace
}
// QueryOption only applies to reads, so always true
func (q QueryOptions) IsRead() bool {
return true
@ -175,6 +193,9 @@ type WriteRequest struct {
// The target region for this write
Region string
// Namespace is the target namespace for the write.
Namespace string
// SecretID is secret portion of the ACL token used for the request
SecretID string
}
@ -184,6 +205,13 @@ func (w WriteRequest) RequestRegion() string {
return w.Region
}
func (w WriteRequest) RequestNamespace() string {
if w.Namespace == "" {
return DefaultNamespace
}
return w.Namespace
}
// WriteRequest only applies to writes, always false
func (w WriteRequest) IsRead() bool {
return false
@ -1434,6 +1462,9 @@ type Job struct {
// Region is the Nomad region that handles scheduling this job
Region string
// Namespace is the namespace the job is submitted into.
Namespace string
// ID is a unique identifier for the job per region. It can be
// specified hierarchically like LineOfBiz/OrgName/Team/Project
ID string
@ -1522,6 +1553,10 @@ type Job struct {
// when registering a Job. A set of warnings are returned if the job was changed
// in anyway that the user should be made aware of.
func (j *Job) Canonicalize() (warnings error) {
if j == nil {
return nil
}
var mErr multierror.Error
// Ensure that an empty and nil map are treated the same to avoid scheduling
// problems since we use reflect DeepEquals.
@ -1529,6 +1564,11 @@ func (j *Job) Canonicalize() (warnings error) {
j.Meta = nil
}
// Ensure the job is in a namespace.
if j.Namespace == "" {
j.Namespace = DefaultNamespace
}
for _, tg := range j.TaskGroups {
tg.Canonicalize(j)
}
@ -1663,6 +1703,9 @@ func (j *Job) Validate() error {
if j.Name == "" {
mErr.Errors = append(mErr.Errors, errors.New("Missing job name"))
}
if j.Namespace == "" {
mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace"))
}
switch j.Type {
case JobTypeCore, JobTypeService, JobTypeBatch, JobTypeSystem:
case "":
@ -1969,8 +2012,12 @@ type JobListStub struct {
// JobSummary summarizes the state of the allocations of a job
type JobSummary struct {
// JobID is the ID of the job the summary is for
JobID string
// Namespace is the namespace of the job and its summary
Namespace string
// Summmary contains the summary per task group for the Job
Summary map[string]TaskGroupSummary
@ -2280,6 +2327,7 @@ const (
// PeriodicLaunch tracks the last launch time of a periodic job.
type PeriodicLaunch struct {
ID string // ID of the periodic job.
Namespace string // Namespace of the periodic job
Launch time.Time // The last launch time.
// Raft Indexes
@ -4199,6 +4247,9 @@ type Deployment struct {
// ID is a generated UUID for the deployment
ID string
// Namespace is the namespace the deployment is created in
Namespace string
// JobID is the job the deployment is created for
JobID string
@ -4232,6 +4283,7 @@ type Deployment struct {
func NewDeployment(job *Job) *Deployment {
return &Deployment{
ID: GenerateUUID(),
Namespace: job.Namespace,
JobID: job.ID,
JobVersion: job.Version,
JobModifyIndex: job.ModifyIndex,
@ -4393,6 +4445,9 @@ type Allocation struct {
// ID of the allocation (UUID)
ID string
// Namespace is the namespace the allocation is created in
Namespace string
// ID of the evaluation that generated this allocation
EvalID string
@ -4843,6 +4898,9 @@ type Evaluation struct {
// is assigned upon the creation of the evaluation.
ID string
// Namespace is the namespace the evaluation is created in
Namespace string
// Priority is used to control scheduling importance and if this job
// can preempt other jobs.
Priority int
@ -4939,7 +4997,7 @@ func (e *Evaluation) TerminalStatus() bool {
}
func (e *Evaluation) GoString() string {
return fmt.Sprintf("<Eval '%s' JobID: '%s'>", e.ID, e.JobID)
return fmt.Sprintf("<Eval %q JobID: %q Namespace: %q>", e.ID, e.JobID, e.Namespace)
}
func (e *Evaluation) Copy() *Evaluation {
@ -5025,6 +5083,7 @@ func (e *Evaluation) MakePlan(j *Job) *Plan {
func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
return &Evaluation{
ID: GenerateUUID(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerRollingUpdate,
@ -5042,6 +5101,7 @@ func (e *Evaluation) NextRollingEval(wait time.Duration) *Evaluation {
func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped bool) *Evaluation {
return &Evaluation{
ID: GenerateUUID(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: e.TriggeredBy,
@ -5060,6 +5120,7 @@ func (e *Evaluation) CreateBlockedEval(classEligibility map[string]bool, escaped
func (e *Evaluation) CreateFailedFollowUpEval(wait time.Duration) *Evaluation {
return &Evaluation{
ID: GenerateUUID(),
Namespace: e.Namespace,
Priority: e.Priority,
Type: e.Type,
TriggeredBy: EvalTriggerFailedFollowUp,

View File

@ -26,16 +26,19 @@ func TestJob_Validate(t *testing.T) {
if !strings.Contains(mErr.Errors[2].Error(), "job name") {
t.Fatalf("err: %s", err)
}
if !strings.Contains(mErr.Errors[3].Error(), "job type") {
if !strings.Contains(mErr.Errors[3].Error(), "namespace") {
t.Fatalf("err: %s", err)
}
if !strings.Contains(mErr.Errors[4].Error(), "priority") {
if !strings.Contains(mErr.Errors[4].Error(), "job type") {
t.Fatalf("err: %s", err)
}
if !strings.Contains(mErr.Errors[5].Error(), "datacenters") {
if !strings.Contains(mErr.Errors[5].Error(), "priority") {
t.Fatalf("err: %s", err)
}
if !strings.Contains(mErr.Errors[6].Error(), "task groups") {
if !strings.Contains(mErr.Errors[6].Error(), "datacenters") {
t.Fatalf("err: %s", err)
}
if !strings.Contains(mErr.Errors[7].Error(), "task groups") {
t.Fatalf("err: %s", err)
}
@ -62,6 +65,7 @@ func TestJob_Validate(t *testing.T) {
j = &Job{
Region: "global",
ID: GenerateUUID(),
Namespace: "test",
Name: "my-job",
Type: JobTypeService,
Priority: 50,
@ -161,6 +165,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
Name: "One task group",
Warnings: []string{"conversion to new update stanza"},
Job: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 2,
@ -174,6 +179,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 2,
@ -202,6 +208,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
Name: "One task group batch",
Warnings: []string{"Update stanza is disallowed for batch jobs"},
Job: &Job{
Namespace: "test",
Type: JobTypeBatch,
Update: UpdateStrategy{
MaxParallel: 2,
@ -215,6 +222,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeBatch,
Update: UpdateStrategy{},
TaskGroups: []*TaskGroup{
@ -231,6 +239,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
Name: "One task group batch - new spec",
Warnings: []string{"Update stanza is disallowed for batch jobs"},
Job: &Job{
Namespace: "test",
Type: JobTypeBatch,
Update: UpdateStrategy{
Stagger: 2 * time.Second,
@ -256,6 +265,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeBatch,
Update: UpdateStrategy{},
TaskGroups: []*TaskGroup{
@ -271,6 +281,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
{
Name: "One task group service - new spec",
Job: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
Stagger: 2 * time.Second,
@ -296,6 +307,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
Stagger: 2 * time.Second,
@ -327,6 +339,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
Name: "One task group; too high of parallelism",
Warnings: []string{"conversion to new update stanza"},
Job: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 200,
@ -340,6 +353,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 200,
@ -368,6 +382,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
Name: "Multiple task group; rounding",
Warnings: []string{"conversion to new update stanza"},
Job: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 2,
@ -389,6 +404,7 @@ func TestJob_Canonicalize_Update(t *testing.T) {
},
},
Expected: &Job{
Namespace: "test",
Type: JobTypeService,
Update: UpdateStrategy{
MaxParallel: 2,
@ -515,6 +531,7 @@ func testJob() *Job {
return &Job{
Region: "global",
ID: GenerateUUID(),
Namespace: "test",
Name: "my-job",
Type: JobTypeService,
Priority: 50,

View File

@ -49,7 +49,7 @@ func TestSystemEndpoint_GarbageCollect(t *testing.T) {
testutil.WaitForResult(func() (bool, error) {
// Check if the job has been GC'd
ws := memdb.NewWatchSet()
exist, err := state.JobByID(ws, job.ID)
exist, err := state.JobByID(ws, job.Namespace, job.ID)
if err != nil {
return false, err
}
@ -78,7 +78,7 @@ func TestSystemEndpoint_ReconcileSummaries(t *testing.T) {
}
// Delete the job summary
state.DeleteJobSummary(1001, job.ID)
state.DeleteJobSummary(1001, job.Namespace, job.ID)
// Make the GC request
req := &structs.GenericRequest{
@ -94,7 +94,7 @@ func TestSystemEndpoint_ReconcileSummaries(t *testing.T) {
testutil.WaitForResult(func() (bool, error) {
// Check if Nomad has reconciled the summary for the job
ws := memdb.NewWatchSet()
summary, err := state.JobSummaryByID(ws, job.ID)
summary, err := state.JobSummaryByID(ws, job.Namespace, job.ID)
if err != nil {
return false, err
}
@ -106,6 +106,7 @@ func TestSystemEndpoint_ReconcileSummaries(t *testing.T) {
// the output so that we can do deep equal
expectedSummary := structs.JobSummary{
JobID: job.ID,
Namespace: job.Namespace,
Summary: map[string]structs.TaskGroupSummary{
"web": structs.TaskGroupSummary{
Queued: 10,

View File

@ -9,6 +9,7 @@ import (
"runtime"
"strconv"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/serf/serf"
)
@ -42,7 +43,9 @@ type serverParts struct {
Expect int
MajorVersion int
MinorVersion int
Build version.Version
Addr net.Addr
Status serf.MemberStatus
}
func (s *serverParts) String() string {
@ -77,6 +80,11 @@ func isNomadServer(m serf.Member) (bool, *serverParts) {
return false, nil
}
build_version, err := version.NewVersion(m.Tags["build"])
if err != nil {
return false, nil
}
// The "vsn" tag was Version, which is now the MajorVersion number.
majorVersionStr := m.Tags["vsn"]
majorVersion, err := strconv.Atoi(majorVersionStr)
@ -103,10 +111,26 @@ func isNomadServer(m serf.Member) (bool, *serverParts) {
Addr: addr,
MajorVersion: majorVersion,
MinorVersion: minorVersion,
Build: *build_version,
Status: m.Status,
}
return true, parts
}
// ServersMeetMinimumVersion returns whether the given alive servers are at least on the
// given Nomad version
func ServersMeetMinimumVersion(members []serf.Member, minVersion *version.Version) bool {
for _, member := range members {
if valid, parts := isNomadServer(member); valid && parts.Status == serf.StatusAlive {
if parts.Build.LessThan(minVersion) {
return false
}
}
}
return true
}
// shuffleStrings randomly shuffles the list of strings
func shuffleStrings(list []string) {
for i := range list {

View File

@ -5,6 +5,7 @@ import (
"reflect"
"testing"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/serf/serf"
)
@ -14,12 +15,14 @@ func TestIsNomadServer(t *testing.T) {
m := serf.Member{
Name: "foo",
Addr: net.IP([]byte{127, 0, 0, 1}),
Status: serf.StatusAlive,
Tags: map[string]string{
"role": "nomad",
"region": "aws",
"dc": "east-aws",
"port": "10000",
"vsn": "1",
"build": "0.7.0+ent",
},
}
valid, parts := isNomadServer(m)
@ -36,6 +39,14 @@ func TestIsNomadServer(t *testing.T) {
if parts.Expect != 0 {
t.Fatalf("bad: %v", parts.Expect)
}
if parts.Status != serf.StatusAlive {
t.Fatalf("bad: %v", parts.Status)
}
if seg := parts.Build.Segments(); len(seg) != 3 {
t.Fatalf("bad: %v", parts.Build)
} else if seg[0] != 0 && seg[1] != 7 && seg[2] != 0 {
t.Fatalf("bad: %v", parts.Build)
}
m.Tags["bootstrap"] = "1"
valid, parts = isNomadServer(m)
@ -57,6 +68,89 @@ func TestIsNomadServer(t *testing.T) {
}
}
func TestServersMeetMinimumVersion(t *testing.T) {
t.Parallel()
makeMember := func(version string) serf.Member {
return serf.Member{
Name: "foo",
Addr: net.IP([]byte{127, 0, 0, 1}),
Tags: map[string]string{
"role": "nomad",
"region": "aws",
"dc": "east-aws",
"port": "10000",
"build": version,
"vsn": "1",
},
Status: serf.StatusAlive,
}
}
cases := []struct {
members []serf.Member
ver *version.Version
expected bool
}{
// One server, meets reqs
{
members: []serf.Member{
makeMember("0.7.5"),
},
ver: version.Must(version.NewVersion("0.7.5")),
expected: true,
},
// One server in dev, meets reqs
{
members: []serf.Member{
makeMember("0.8.5-dev"),
},
ver: version.Must(version.NewVersion("0.7.5")),
expected: true,
},
// One server with meta, meets reqs
{
members: []serf.Member{
makeMember("0.7.5+ent"),
},
ver: version.Must(version.NewVersion("0.7.5")),
expected: true,
},
// One server, doesn't meet reqs
{
members: []serf.Member{
makeMember("0.7.5"),
},
ver: version.Must(version.NewVersion("0.8.0")),
expected: false,
},
// Multiple servers, meets req version
{
members: []serf.Member{
makeMember("0.7.5"),
makeMember("0.8.0"),
},
ver: version.Must(version.NewVersion("0.7.5")),
expected: true,
},
// Multiple servers, doesn't meet req version
{
members: []serf.Member{
makeMember("0.7.5"),
makeMember("0.8.0"),
},
ver: version.Must(version.NewVersion("0.8.0")),
expected: false,
},
}
for _, tc := range cases {
result := ServersMeetMinimumVersion(tc.members, tc.ver)
if result != tc.expected {
t.Fatalf("bad: %v, %v, %v", result, tc.ver.String(), tc)
}
}
}
func TestShuffleStrings(t *testing.T) {
t.Parallel()
// Generate input

View File

@ -448,7 +448,7 @@ func (w *Worker) ReblockEval(eval *structs.Evaluation) error {
// Update the evaluation if the queued jobs is not same as what is
// recorded in the job summary
ws := memdb.NewWatchSet()
summary, err := w.srv.fsm.state.JobSummaryByID(ws, eval.JobID)
summary, err := w.srv.fsm.state.JobSummaryByID(ws, eval.Namespace, eval.JobID)
if err != nil {
return fmt.Errorf("couldn't retrieve job summary: %v", err)
}

View File

@ -56,6 +56,7 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
j1, j2 := mock.Job(), mock.Job()
alloc1 := &structs.Allocation{
ID: structs.GenerateUUID(),
Namespace: structs.DefaultNamespace,
EvalID: structs.GenerateUUID(),
NodeID: nodes[0].Node.ID,
JobID: j1.ID,
@ -70,6 +71,7 @@ func TestEvalContext_ProposedAlloc(t *testing.T) {
}
alloc2 := &structs.Allocation{
ID: structs.GenerateUUID(),
Namespace: structs.DefaultNamespace,
EvalID: structs.GenerateUUID(),
NodeID: nodes[1].Node.ID,
JobID: j2.ID,

View File

@ -452,6 +452,7 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) {
job := &structs.Job{
ID: "foo",
Namespace: structs.DefaultNamespace,
Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}},
TaskGroups: []*structs.TaskGroup{tg1, tg2},
}
@ -461,6 +462,7 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) {
plan := ctx.Plan()
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -469,6 +471,7 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: "ignore 2",
Job: job,
@ -477,6 +480,7 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) {
}
plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -485,6 +489,7 @@ func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: "ignore 2",
Job: job,
@ -521,6 +526,7 @@ func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) {
job := &structs.Job{
ID: "foo",
Namespace: structs.DefaultNamespace,
Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}},
TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3},
}
@ -530,6 +536,7 @@ func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) {
plan := ctx.Plan()
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
ID: structs.GenerateUUID(),
@ -537,6 +544,7 @@ func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) {
}
plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
ID: structs.GenerateUUID(),
@ -575,6 +583,7 @@ func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) {
plan := ctx.Plan()
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: "foo",
},
@ -584,6 +593,7 @@ func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) {
// different job.
plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: "bar",
},
@ -591,7 +601,10 @@ func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) {
proposed := NewDistinctHostsIterator(ctx, static)
proposed.SetTaskGroup(tg1)
proposed.SetJob(&structs.Job{ID: "foo"})
proposed.SetJob(&structs.Job{
ID: "foo",
Namespace: structs.DefaultNamespace,
})
out := collectFeasible(proposed)
if len(out) != 1 {
@ -644,6 +657,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
job := &structs.Job{
ID: "foo",
Namespace: structs.DefaultNamespace,
Constraints: []*structs.Constraint{
{
Operand: structs.ConstraintDistinctProperty,
@ -660,6 +674,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
alloc1ID := structs.GenerateUUID()
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -669,6 +684,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: "ignore 2",
Job: job,
@ -678,6 +694,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
}
plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -687,6 +704,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: "ignore 2",
Job: job,
@ -699,6 +717,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
stoppingAllocID := structs.GenerateUUID()
plan.NodeUpdate[nodes[4].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -711,6 +730,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
// Have one of the allocations exist in both the plan and the state
// store. This resembles an allocation update
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -720,6 +740,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -730,6 +751,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: "ignore 2",
Job: job,
@ -738,6 +760,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
NodeID: nodes[1].ID,
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -748,6 +771,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: "ignore 2",
Job: job,
@ -756,6 +780,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
NodeID: nodes[3].ID,
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -810,6 +835,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
job := &structs.Job{
ID: "foo",
Namespace: structs.DefaultNamespace,
Constraints: []*structs.Constraint{
{
Operand: structs.ConstraintDistinctProperty,
@ -827,6 +853,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
alloc1ID := structs.GenerateUUID()
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -835,6 +862,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -844,6 +872,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: "ignore 2",
Job: job,
@ -853,6 +882,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
}
plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -861,6 +891,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -870,6 +901,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: "ignore 2",
Job: job,
@ -879,6 +911,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
}
plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -888,6 +921,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: "ignore 2",
Job: job,
@ -900,6 +934,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
stoppingAllocID := structs.GenerateUUID()
plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -912,6 +947,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
// Have one of the allocations exist in both the plan and the state
// store. This resembles an allocation update
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -921,6 +957,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -930,6 +967,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -940,6 +978,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: "ignore 2",
Job: job,
@ -948,6 +987,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
NodeID: nodes[1].ID,
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: "ignore 2",
Job: job,
@ -995,6 +1035,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin
// Create a job with a distinct_property constraint and a task groups.
tg1 := &structs.TaskGroup{Name: "bar"}
job := &structs.Job{
Namespace: structs.DefaultNamespace,
ID: "foo",
Constraints: []*structs.Constraint{
{
@ -1008,6 +1049,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin
plan := ctx.Plan()
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -1019,6 +1061,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin
stoppingAllocID := structs.GenerateUUID()
plan.NodeUpdate[nodes[0].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -1029,6 +1072,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testin
upserting := []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -1079,6 +1123,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) {
tg3 := &structs.TaskGroup{Name: "bam"}
job := &structs.Job{
Namespace: structs.DefaultNamespace,
ID: "foo",
Constraints: []*structs.Constraint{
{
@ -1094,6 +1139,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) {
plan := ctx.Plan()
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -1103,6 +1149,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) {
}
upserting := []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -1153,6 +1200,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin
tg3 := &structs.TaskGroup{Name: "bam"}
job := &structs.Job{
Namespace: structs.DefaultNamespace,
ID: "foo",
Constraints: []*structs.Constraint{
{
@ -1169,6 +1217,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin
plan := ctx.Plan()
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -1176,6 +1225,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin
NodeID: nodes[0].ID,
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -1185,6 +1235,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin
}
upserting := []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -1193,6 +1244,7 @@ func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testin
NodeID: nodes[1].ID,
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg2.Name,
JobID: job.ID,
Job: job,
@ -1251,6 +1303,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) {
tg2 := &structs.TaskGroup{Name: "baz"}
job := &structs.Job{
Namespace: structs.DefaultNamespace,
ID: "foo",
TaskGroups: []*structs.TaskGroup{tg1, tg2},
}
@ -1261,6 +1314,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) {
plan := ctx.Plan()
plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -1273,6 +1327,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) {
stoppingAllocID := structs.GenerateUUID()
plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -1283,6 +1338,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) {
upserting := []*structs.Allocation{
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,
@ -1293,6 +1349,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) {
// Should be ignored as it is a different job.
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: "ignore 2",
Job: job,
@ -1302,6 +1359,7 @@ func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) {
},
&structs.Allocation{
Namespace: structs.DefaultNamespace,
TaskGroup: tg1.Name,
JobID: job.ID,
Job: job,

View File

@ -191,7 +191,7 @@ func (s *GenericScheduler) process() (bool, error) {
// Lookup the Job by ID
var err error
ws := memdb.NewWatchSet()
s.job, err = s.state.JobByID(ws, s.eval.JobID)
s.job, err = s.state.JobByID(ws, s.eval.Namespace, s.eval.JobID)
if err != nil {
return false, fmt.Errorf("failed to get job %q: %v", s.eval.JobID, err)
}
@ -208,7 +208,7 @@ func (s *GenericScheduler) process() (bool, error) {
if !s.batch {
// Get any existing deployment
s.deployment, err = s.state.LatestDeploymentByJobID(ws, s.eval.JobID)
s.deployment, err = s.state.LatestDeploymentByJobID(ws, s.eval.Namespace, s.eval.JobID)
if err != nil {
return false, fmt.Errorf("failed to get job deployment %q: %v", s.eval.JobID, err)
}
@ -365,7 +365,7 @@ func (s *GenericScheduler) filterCompleteAllocs(allocs []*structs.Allocation) ([
func (s *GenericScheduler) computeJobAllocs() error {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
allocs, err := s.state.AllocsByJob(ws, s.eval.JobID, true)
allocs, err := s.state.AllocsByJob(ws, s.eval.Namespace, s.eval.JobID, true)
if err != nil {
return fmt.Errorf("failed to get allocs for job '%s': %v",
s.eval.JobID, err)
@ -517,6 +517,7 @@ func (s *GenericScheduler) computePlacements(destructive, place []placementResul
// Create an allocation for this
alloc := &structs.Allocation{
ID: structs.GenerateUUID(),
Namespace: s.job.Namespace,
EvalID: s.eval.ID,
Name: missing.Name(),
JobID: s.job.ID,

View File

@ -29,6 +29,7 @@ func TestServiceSched_JobRegister(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -71,7 +72,7 @@ func TestServiceSched_JobRegister(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -111,6 +112,7 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -141,6 +143,7 @@ func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) {
// Create a mock evaluation to handle the update
eval = &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -197,6 +200,7 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -236,7 +240,7 @@ func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure only one allocation was placed
@ -265,6 +269,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -305,7 +310,7 @@ func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -353,6 +358,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -398,7 +404,7 @@ func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -444,6 +450,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -483,7 +490,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -535,6 +542,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T)
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -563,7 +571,7 @@ func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T)
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
assert.Nil(err, "AllocsByJob")
// Ensure all allocations placed
@ -587,6 +595,7 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -617,7 +626,7 @@ func TestServiceSched_JobRegister_Annotate(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -664,6 +673,7 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -683,7 +693,7 @@ func TestServiceSched_JobRegister_CountZero(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure no allocations placed
@ -704,6 +714,7 @@ func TestServiceSched_JobRegister_AllocFail(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -785,6 +796,7 @@ func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -881,6 +893,7 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -910,7 +923,7 @@ func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) {
// Ensure two allocations placed
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
if len(out) != 2 {
t.Fatalf("bad: %#v", out)
@ -956,6 +969,7 @@ func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) {
// Create a mock blocked evaluation
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Status: structs.EvalStatusBlocked,
Priority: job.Priority,
@ -996,6 +1010,7 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1030,7 +1045,7 @@ func TestServiceSched_Plan_Partial_Progress(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure only one allocations placed
@ -1055,6 +1070,7 @@ func TestServiceSched_EvaluateBlockedEval(t *testing.T) {
// Create a mock blocked evaluation
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Status: structs.EvalStatusBlocked,
Priority: job.Priority,
@ -1105,6 +1121,7 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) {
// Create a mock blocked evaluation
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Status: structs.EvalStatusBlocked,
Priority: job.Priority,
@ -1151,7 +1168,7 @@ func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -1222,6 +1239,7 @@ func TestServiceSched_JobModify(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1260,7 +1278,7 @@ func TestServiceSched_JobModify(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -1305,6 +1323,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1352,7 +1371,7 @@ func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -1411,6 +1430,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1449,7 +1469,7 @@ func TestServiceSched_JobModify_CountZero(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -1504,6 +1524,7 @@ func TestServiceSched_JobModify_Rolling(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1608,6 +1629,7 @@ func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) {
noErr(t, h.State.UpsertJob(h.NextIndex(), job2))
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1708,6 +1730,7 @@ func TestServiceSched_JobModify_Canaries(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1815,6 +1838,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1858,7 +1882,7 @@ func TestServiceSched_JobModify_InPlace(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -1928,6 +1952,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1973,7 +1998,7 @@ func TestServiceSched_JobModify_DistinctProperty(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -2013,6 +2038,7 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) {
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobDeregister,
@ -2038,7 +2064,7 @@ func TestServiceSched_JobDeregister_Purged(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure that the job field on the allocation is still populated
@ -2079,6 +2105,7 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) {
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobDeregister,
@ -2104,7 +2131,7 @@ func TestServiceSched_JobDeregister_Stopped(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure that the job field on the allocation is still populated
@ -2167,6 +2194,7 @@ func TestServiceSched_NodeDown(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -2232,6 +2260,7 @@ func TestServiceSched_NodeUpdate(t *testing.T) {
// Create a mock evaluation which won't trigger any new placements
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -2282,6 +2311,7 @@ func TestServiceSched_NodeDrain(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -2317,7 +2347,7 @@ func TestServiceSched_NodeDrain(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -2382,6 +2412,7 @@ func TestServiceSched_NodeDrain_Down(t *testing.T) {
// Create a mock evaluation to deal with the node update
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -2455,6 +2486,7 @@ func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -2511,6 +2543,7 @@ func TestServiceSched_NodeDrain_UpdateStrategy(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -2569,6 +2602,7 @@ func TestServiceSched_RetryLimit(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -2588,7 +2622,7 @@ func TestServiceSched_RetryLimit(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure no allocations placed
@ -2623,6 +2657,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -2642,7 +2677,7 @@ func TestBatchSched_Run_CompleteAlloc(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure no allocations placed
@ -2677,6 +2712,7 @@ func TestBatchSched_Run_DrainedAlloc(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -2696,7 +2732,7 @@ func TestBatchSched_Run_DrainedAlloc(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure a replacement alloc was placed.
@ -2730,6 +2766,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -2749,7 +2786,7 @@ func TestBatchSched_Run_FailedAlloc(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure a replacement alloc was placed.
@ -2790,6 +2827,7 @@ func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -2849,6 +2887,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) {
// Create a mock evaluation to rerun the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -2868,7 +2907,7 @@ func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure no replacement alloc was placed.
@ -2916,6 +2955,7 @@ func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -3046,6 +3086,7 @@ func TestGenericSched_ChainedAlloc(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -3074,6 +3115,7 @@ func TestGenericSched_ChainedAlloc(t *testing.T) {
// Create a mock evaluation to update the job
eval1 := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job1.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -3133,6 +3175,7 @@ func TestServiceSched_NodeDrain_Sticky(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -3190,6 +3233,7 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) {
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobDeregister,
@ -3210,7 +3254,7 @@ func TestServiceSched_CancelDeployment_Stopped(t *testing.T) {
// Ensure the plan cancelled the existing deployment
ws := memdb.NewWatchSet()
out, err := h.State.LatestDeploymentByJobID(ws, job.ID)
out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID)
noErr(t, err)
if out == nil {
@ -3258,6 +3302,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) {
// Create a mock evaluation to kick the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -3278,7 +3323,7 @@ func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) {
// Ensure the plan cancelled the existing deployment
ws := memdb.NewWatchSet()
out, err := h.State.LatestDeploymentByJobID(ws, job.ID)
out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID)
noErr(t, err)
if out == nil {

View File

@ -17,6 +17,9 @@ type propertySet struct {
// jobID is the job we are operating on
jobID string
// namespace is the namespace of the job we are operating on
namespace string
// taskGroup is optionally set if the constraint is for a task group
taskGroup string
@ -50,6 +53,7 @@ func NewPropertySet(ctx Context, job *structs.Job) *propertySet {
p := &propertySet{
ctx: ctx,
jobID: job.ID,
namespace: job.Namespace,
existingValues: make(map[string]uint64),
}
@ -109,7 +113,7 @@ func (p *propertySet) setConstraint(constraint *structs.Constraint, taskGroup st
func (p *propertySet) populateExisting(constraint *structs.Constraint) {
// Retrieve all previously placed allocations
ws := memdb.NewWatchSet()
allocs, err := p.ctx.State().AllocsByJob(ws, p.jobID, false)
allocs, err := p.ctx.State().AllocsByJob(ws, p.namespace, p.jobID, false)
if err != nil {
p.errorBuilding = fmt.Errorf("failed to get job's allocations: %v", err)
p.ctx.Logger().Printf("[ERR] scheduler.dynamic-constraint: %v", p.errorBuilding)

View File

@ -204,6 +204,7 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) {
// Add existing allocations
j1, j2 := mock.Job(), mock.Job()
alloc1 := &structs.Allocation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
EvalID: structs.GenerateUUID(),
NodeID: nodes[0].Node.ID,
@ -218,6 +219,7 @@ func TestBinPackIterator_ExistingAlloc(t *testing.T) {
TaskGroup: "web",
}
alloc2 := &structs.Allocation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
EvalID: structs.GenerateUUID(),
NodeID: nodes[1].Node.ID,
@ -291,6 +293,7 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
// Add existing allocations
j1, j2 := mock.Job(), mock.Job()
alloc1 := &structs.Allocation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
EvalID: structs.GenerateUUID(),
NodeID: nodes[0].Node.ID,
@ -305,6 +308,7 @@ func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
TaskGroup: "web",
}
alloc2 := &structs.Allocation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
EvalID: structs.GenerateUUID(),
NodeID: nodes[1].Node.ID,

View File

@ -66,7 +66,7 @@ type State interface {
Nodes(ws memdb.WatchSet) (memdb.ResultIterator, error)
// AllocsByJob returns the allocations by JobID
AllocsByJob(ws memdb.WatchSet, jobID string, all bool) ([]*structs.Allocation, error)
AllocsByJob(ws memdb.WatchSet, namespace, jobID string, all bool) ([]*structs.Allocation, error)
// AllocsByNode returns all the allocations by node
AllocsByNode(ws memdb.WatchSet, node string) ([]*structs.Allocation, error)
@ -78,11 +78,11 @@ type State interface {
NodeByID(ws memdb.WatchSet, nodeID string) (*structs.Node, error)
// GetJobByID is used to lookup a job by ID
JobByID(ws memdb.WatchSet, id string) (*structs.Job, error)
JobByID(ws memdb.WatchSet, namespace, id string) (*structs.Job, error)
// LatestDeploymentByJobID returns the latest deployment matching the given
// job ID
LatestDeploymentByJobID(ws memdb.WatchSet, jobID string) (*structs.Deployment, error)
LatestDeploymentByJobID(ws memdb.WatchSet, namespace, jobID string) (*structs.Deployment, error)
}
// Planner interface is used to submit a task allocation plan.

View File

@ -90,7 +90,7 @@ func (s *SystemScheduler) process() (bool, error) {
// Lookup the Job by ID
var err error
ws := memdb.NewWatchSet()
s.job, err = s.state.JobByID(ws, s.eval.JobID)
s.job, err = s.state.JobByID(ws, s.eval.Namespace, s.eval.JobID)
if err != nil {
return false, fmt.Errorf("failed to get job '%s': %v",
s.eval.JobID, err)
@ -182,7 +182,7 @@ func (s *SystemScheduler) process() (bool, error) {
func (s *SystemScheduler) computeJobAllocs() error {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
allocs, err := s.state.AllocsByJob(ws, s.eval.JobID, true)
allocs, err := s.state.AllocsByJob(ws, s.eval.Namespace, s.eval.JobID, true)
if err != nil {
return fmt.Errorf("failed to get allocs for job '%s': %v",
s.eval.JobID, err)
@ -307,6 +307,7 @@ func (s *SystemScheduler) computePlacements(place []allocTuple) error {
// Create an allocation for this
alloc := &structs.Allocation{
ID: structs.GenerateUUID(),
Namespace: s.job.Namespace,
EvalID: s.eval.ID,
Name: missing.Name,
JobID: s.job.ID,

View File

@ -26,6 +26,7 @@ func TestSystemSched_JobRegister(t *testing.T) {
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -60,7 +61,7 @@ func TestSystemSched_JobRegister(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -98,6 +99,7 @@ func TestSystemeSched_JobRegister_StickyAllocs(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -126,6 +128,7 @@ func TestSystemeSched_JobRegister_StickyAllocs(t *testing.T) {
// Create a mock evaluation to handle the update
eval = &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -172,6 +175,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -185,7 +189,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -197,6 +201,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
h1 := NewHarnessWithState(t, h.State)
// Create a mock evaluation to register the job
eval1 := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job1.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -208,7 +213,7 @@ func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
t.Fatalf("err: %v", err)
}
out, err = h1.State.AllocsByJob(ws, job1.ID, false)
out, err = h1.State.AllocsByJob(ws, job.Namespace, job1.ID, false)
noErr(t, err)
if len(out) != 0 {
t.Fatalf("bad: %#v", out)
@ -230,6 +235,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: svcJob.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -248,6 +254,7 @@ func TestSystemSched_ExhaustResources(t *testing.T) {
// Create a mock evaluation to register the job
eval1 := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -293,6 +300,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -323,7 +331,7 @@ func TestSystemSched_JobRegister_Annotate(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -391,6 +399,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) {
// Create a mock evaluation to deal with the node update
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -435,7 +444,7 @@ func TestSystemSched_JobRegister_AddNode(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -457,6 +466,7 @@ func TestSystemSched_JobRegister_AllocFail(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -526,6 +536,7 @@ func TestSystemSched_JobModify(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -564,7 +575,7 @@ func TestSystemSched_JobModify(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -616,6 +627,7 @@ func TestSystemSched_JobModify_Rolling(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -710,6 +722,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -753,7 +766,7 @@ func TestSystemSched_JobModify_InPlace(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure all allocations placed
@ -803,6 +816,7 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) {
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobDeregister,
@ -830,7 +844,7 @@ func TestSystemSched_JobDeregister_Purged(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure no remaining allocations
@ -874,6 +888,7 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) {
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerJobDeregister,
@ -901,7 +916,7 @@ func TestSystemSched_JobDeregister_Stopped(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure no remaining allocations
@ -934,6 +949,7 @@ func TestSystemSched_NodeDown(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -998,6 +1014,7 @@ func TestSystemSched_NodeDrain_Down(t *testing.T) {
// Create a mock evaluation to deal with the node update
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -1056,6 +1073,7 @@ func TestSystemSched_NodeDrain(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -1118,6 +1136,7 @@ func TestSystemSched_NodeUpdate(t *testing.T) {
// Create a mock evaluation to deal
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -1155,6 +1174,7 @@ func TestSystemSched_RetryLimit(t *testing.T) {
// Create a mock evaluation to deregister the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1174,7 +1194,7 @@ func TestSystemSched_RetryLimit(t *testing.T) {
// Lookup the allocations by JobID
ws := memdb.NewWatchSet()
out, err := h.State.AllocsByJob(ws, job.ID, false)
out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
noErr(t, err)
// Ensure no allocations placed
@ -1203,6 +1223,7 @@ func TestSystemSched_Queued_With_Constraints(t *testing.T) {
// Create a mock evaluation to deal
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -1237,6 +1258,7 @@ func TestSystemSched_ChainedAlloc(t *testing.T) {
// Create a mock evaluation to register the job
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1271,6 +1293,7 @@ func TestSystemSched_ChainedAlloc(t *testing.T) {
// Create a mock evaluation to update the job
eval1 := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: job1.Priority,
TriggeredBy: structs.EvalTriggerJobRegister,
@ -1359,6 +1382,7 @@ func TestSystemSched_PlanWithDrainedNode(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,
@ -1429,6 +1453,7 @@ func TestSystemSched_QueuedAllocsMultTG(t *testing.T) {
// Create a mock evaluation to deal with drain
eval := &structs.Evaluation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
Priority: 50,
TriggeredBy: structs.EvalTriggerNodeUpdate,

View File

@ -731,6 +731,7 @@ func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
// Register an alloc
alloc := &structs.Allocation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
EvalID: eval.ID,
NodeID: node.ID,
@ -779,6 +780,7 @@ func TestInplaceUpdate_NoMatch(t *testing.T) {
// Register an alloc
alloc := &structs.Allocation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
EvalID: eval.ID,
NodeID: node.ID,
@ -826,6 +828,7 @@ func TestInplaceUpdate_Success(t *testing.T) {
// Register an alloc
alloc := &structs.Allocation{
Namespace: structs.DefaultNamespace,
ID: structs.GenerateUUID(),
EvalID: eval.ID,
NodeID: node.ID,